diff --git a/Cargo.lock b/Cargo.lock index fd6dacade5522..4de48ee9df609 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9492,6 +9492,7 @@ dependencies = [ "sui-protocol-config", "sui-sdk", "sui-simulator", + "sui-swarm", "sui-swarm-config", "sui-test-transaction-builder", "sui-types", @@ -11213,7 +11214,6 @@ dependencies = [ "move-core-types", "move-package", "mysten-metrics", - "mysten-network", "prometheus", "rand 0.8.5", "serde_json", diff --git a/crates/mysten-network/src/multiaddr.rs b/crates/mysten-network/src/multiaddr.rs index 632cad3b7bf83..902ef8f549395 100644 --- a/crates/mysten-network/src/multiaddr.rs +++ b/crates/mysten-network/src/multiaddr.rs @@ -84,6 +84,22 @@ impl Multiaddr { } } + // Converts a /ip{4,6}/-/tcp/-[/-] Multiaddr to SocketAddr. + // Useful when an external library only accepts SocketAddr, e.g. to start a local server. + // See `client::endpoint_from_multiaddr()` for converting to Endpoint for clients. + pub fn to_socket_addr(&self) -> Result { + let mut iter = self.iter(); + let ip = match iter.next().ok_or_else(|| { + eyre!("failed to convert to SocketAddr: Multiaddr does not contain IP") + })? { + Protocol::Ip4(ip4_addr) => IpAddr::V4(ip4_addr), + Protocol::Ip6(ip6_addr) => IpAddr::V6(ip6_addr), + unsupported => return Err(eyre!("unsupported protocol {unsupported}")), + }; + let tcp_port = parse_tcp(&mut iter)?; + Ok(SocketAddr::new(ip, tcp_port)) + } + /// Set the ip address to `0.0.0.0`. For instance, it converts the following address /// `/ip4/155.138.174.208/tcp/1500/http` into `/ip4/0.0.0.0/tcp/1500/http`. pub fn zero_ip_multi_address(&self) -> Self { @@ -151,23 +167,6 @@ impl<'de> serde::Deserialize<'de> for Multiaddr { } } -// Converts a /ip{4,6}/-/tcp/-[/-] Multiaddr to SocketAddr. -// Useful when an external library only accepts SocketAddr, e.g. to start a local server. -// See `client::endpoint_from_multiaddr()` for converting to Endpoint for clients. -pub fn to_socket_addr(addr: &Multiaddr) -> Result { - let mut iter = addr.iter(); - let ip = match iter - .next() - .ok_or_else(|| eyre!("failed to convert to SocketAddr: Multiaddr does not contain IP"))? - { - Protocol::Ip4(ip4_addr) => IpAddr::V4(ip4_addr), - Protocol::Ip6(ip6_addr) => IpAddr::V6(ip6_addr), - unsupported => return Err(eyre!("unsupported protocol {unsupported}")), - }; - let tcp_port = parse_tcp(&mut iter)?; - Ok(SocketAddr::new(ip, tcp_port)) -} - pub(crate) fn parse_tcp<'a, T: Iterator>>(protocols: &mut T) -> Result { if let Protocol::Tcp(port) = protocols .next() @@ -275,25 +274,29 @@ pub(crate) fn parse_unix(address: &Multiaddr) -> Result<(Cow<'_, str>, &'static #[cfg(test)] mod test { - use super::{to_socket_addr, Multiaddr}; + use super::Multiaddr; use multiaddr::multiaddr; #[test] fn test_to_socket_addr_basic() { let multi_addr_ipv4 = Multiaddr(multiaddr!(Ip4([127, 0, 0, 1]), Tcp(10500u16))); - let socket_addr_ipv4 = - to_socket_addr(&multi_addr_ipv4).expect("Couldn't convert to socket addr"); + let socket_addr_ipv4 = multi_addr_ipv4 + .to_socket_addr() + .expect("Couldn't convert to socket addr"); assert_eq!(socket_addr_ipv4.to_string(), "127.0.0.1:10500"); let multi_addr_ipv6 = Multiaddr(multiaddr!(Ip6([172, 0, 0, 1, 1, 1, 1, 1]), Tcp(10500u16))); - let socket_addr_ipv6 = - to_socket_addr(&multi_addr_ipv6).expect("Couldn't convert to socket addr"); + let socket_addr_ipv6 = multi_addr_ipv6 + .to_socket_addr() + .expect("Couldn't convert to socket addr"); assert_eq!(socket_addr_ipv6.to_string(), "[ac::1:1:1:1:1]:10500"); } #[test] fn test_to_socket_addr_unsupported_protocol() { let multi_addr_dns = Multiaddr(multiaddr!(Dnsaddr("mysten.sui"), Tcp(10500u16))); - let _ = to_socket_addr(&multi_addr_dns).expect_err("DNS is unsupported"); + let _ = multi_addr_dns + .to_socket_addr() + .expect_err("DNS is unsupported"); } } diff --git a/crates/sui-benchmark/Cargo.toml b/crates/sui-benchmark/Cargo.toml index 2bf5d056e8fcf..552335aaaaff1 100644 --- a/crates/sui-benchmark/Cargo.toml +++ b/crates/sui-benchmark/Cargo.toml @@ -38,6 +38,7 @@ sui-json-rpc-types = { path = "../sui-json-rpc-types" } sui-protocol-config = { path = "../sui-protocol-config" } sui-test-transaction-builder = { path = "../sui-test-transaction-builder" } sui-swarm-config = { path = "../sui-swarm-config" } +sui-swarm = { path = "../sui-swarm" } telemetry-subscribers.workspace = true roaring = "0.10.1" regex = "1.7.1" diff --git a/crates/sui-benchmark/src/benchmark_setup.rs b/crates/sui-benchmark/src/benchmark_setup.rs index fb75a1766c6d1..d819568b820df 100644 --- a/crates/sui-benchmark/src/benchmark_setup.rs +++ b/crates/sui-benchmark/src/benchmark_setup.rs @@ -1,24 +1,28 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 + use crate::bank::BenchmarkBank; use crate::options::Opts; use crate::util::get_ed25519_keypair_from_keystore; use crate::{FullNodeProxy, LocalValidatorAggregatorProxy, ValidatorProxy}; use anyhow::{anyhow, bail, Context, Result}; use prometheus::Registry; +use rand::rngs::OsRng; use rand::seq::SliceRandom; use std::path::PathBuf; use std::sync::Arc; use std::thread::JoinHandle; use std::time::Duration; -use sui_config::utils; +use sui_config::local_ip_utils; +use sui_config::node::ExpensiveSafetyCheckConfig; +use sui_swarm_config::node_config_builder::FullnodeConfigBuilder; use sui_types::base_types::ObjectID; use sui_types::base_types::SuiAddress; use sui_types::crypto::{deterministic_random_account_key, AccountKeyPair}; use sui_types::object::generate_max_test_gas_objects_with_owner; use sui_types::object::Owner; +use test_utils::authority::spawn_test_authorities; use test_utils::authority::test_and_configure_authority_configs_with_objects; -use test_utils::authority::{spawn_fullnode, spawn_test_authorities}; use tokio::runtime::Builder; use tokio::sync::{oneshot, Barrier}; use tokio::time::sleep; @@ -112,8 +116,8 @@ impl Env { .clone(); // Make the client runtime wait until we are done creating genesis objects let cloned_config = config.clone(); - let fullnode_ip = format!("{}", utils::get_local_ip_for_tests()); - let fullnode_rpc_port = utils::get_available_port(&fullnode_ip); + let fullnode_ip = local_ip_utils::localhost_for_testing(); + let fullnode_rpc_port = local_ip_utils::get_available_port(&fullnode_ip); let fullnode_barrier = Arc::new(Barrier::new(2)); let fullnode_barrier_clone = fullnode_barrier.clone(); // spawn a thread to spin up sui nodes on the multi-threaded server runtime. @@ -130,7 +134,17 @@ impl Env { server_runtime.block_on(async move { // Setup the network let _validators: Vec<_> = spawn_test_authorities(&cloned_config).await; - let _fullnode = spawn_fullnode(&cloned_config, Some(fullnode_rpc_port)).await; + + let node_config = FullnodeConfigBuilder::new() + .with_rpc_port(fullnode_rpc_port) + .with_expensive_safety_check_config( + ExpensiveSafetyCheckConfig::new_disable_all(), + ) + .build(&mut OsRng, &cloned_config); + let node = sui_swarm::memory::Node::new(node_config); + node.start().await.unwrap(); + let _fullnode = node.get_node_handle().unwrap(); + fullnode_barrier_clone.wait().await; barrier.wait().await; recv.await.expect("Unable to wait for terminate signal"); diff --git a/crates/sui-benchmark/tests/simtest.rs b/crates/sui-benchmark/tests/simtest.rs index 16975529f4a15..64be04304a175 100644 --- a/crates/sui-benchmark/tests/simtest.rs +++ b/crates/sui-benchmark/tests/simtest.rs @@ -4,6 +4,7 @@ #[cfg(msim)] mod test { use rand::{distributions::uniform::SampleRange, thread_rng, Rng}; + use std::collections::HashSet; use std::path::PathBuf; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; @@ -102,16 +103,35 @@ mod test { test_simulated_load(TestInitData::new(&test_cluster).await, 120).await; } + /// Get a list of nodes that we don't want to kill in the crash recovery tests. + /// This includes the client node which is the node that is running the test, as well as + /// rpc fullnode which are needed to run the benchmark. + fn get_keep_alive_nodes(cluster: &TestCluster) -> HashSet { + let mut keep_alive_nodes = HashSet::new(); + // The first fullnode in the swarm ins the rpc fullnode. + keep_alive_nodes.insert( + cluster + .swarm + .fullnodes() + .next() + .unwrap() + .get_node_handle() + .unwrap() + .with(|n| n.get_sim_node_id()), + ); + keep_alive_nodes.insert(sui_simulator::current_simnode_id()); + keep_alive_nodes + } + fn handle_failpoint( dead_validator: Arc>>, - client_node: sui_simulator::task::NodeId, + keep_alive_nodes: HashSet, probability: f64, ) { let mut dead_validator = dead_validator.lock().unwrap(); let cur_node = sui_simulator::current_simnode_id(); - // never kill the client node (which is running the test) - if cur_node == client_node { + if keep_alive_nodes.contains(&cur_node) { return; } @@ -167,7 +187,8 @@ mod test { let dead_validator_orig: Arc>> = Default::default(); let dead_validator = dead_validator_orig.clone(); - let client_node = sui_simulator::current_simnode_id(); + let keep_alive_nodes = get_keep_alive_nodes(&test_cluster); + let keep_alive_nodes_clone = keep_alive_nodes.clone(); register_fail_points( &[ "batch-write-before", @@ -180,20 +201,23 @@ mod test { "highest-executed-checkpoint", ], move || { - handle_failpoint(dead_validator.clone(), client_node, 0.02); + handle_failpoint(dead_validator.clone(), keep_alive_nodes_clone.clone(), 0.02); }, ); let dead_validator = dead_validator_orig.clone(); + let keep_alive_nodes_clone = keep_alive_nodes.clone(); register_fail_point_async("crash", move || { let dead_validator = dead_validator.clone(); + let keep_alive_nodes_clone = keep_alive_nodes_clone.clone(); async move { - handle_failpoint(dead_validator.clone(), client_node, 0.01); + handle_failpoint(dead_validator.clone(), keep_alive_nodes_clone.clone(), 0.01); } }); // Narwhal fail points. let dead_validator = dead_validator_orig.clone(); + let keep_alive_nodes_clone = keep_alive_nodes.clone(); register_fail_points( &[ "narwhal-rpc-response", @@ -201,7 +225,11 @@ mod test { "narwhal-store-after-write", ], move || { - handle_failpoint(dead_validator.clone(), client_node, 0.001); + handle_failpoint( + dead_validator.clone(), + keep_alive_nodes_clone.clone(), + 0.001, + ); }, ); register_fail_point_async("narwhal-delay", || delay_failpoint(10..20, 0.001)); @@ -215,9 +243,9 @@ mod test { let test_cluster = build_test_cluster(4, 10000).await; let dead_validator: Arc>> = Default::default(); - let client_node = sui_simulator::current_simnode_id(); + let keep_alive_nodes = get_keep_alive_nodes(&test_cluster); register_fail_points(&["before-open-new-epoch-store"], move || { - handle_failpoint(dead_validator.clone(), client_node, 1.0); + handle_failpoint(dead_validator.clone(), keep_alive_nodes.clone(), 1.0); }); test_simulated_load(TestInitData::new(&test_cluster).await, 120).await; } diff --git a/crates/sui-cluster-test/src/cluster.rs b/crates/sui-cluster-test/src/cluster.rs index 62ad61ebe7f20..087f05b5b535a 100644 --- a/crates/sui-cluster-test/src/cluster.rs +++ b/crates/sui-cluster-test/src/cluster.rs @@ -186,7 +186,7 @@ impl Cluster for LocalNewCluster { cluster_builder = cluster_builder.with_epoch_duration_ms(epoch_duration_ms); } if let Some(rpc_port) = fullnode_port { - cluster_builder = cluster_builder.set_fullnode_rpc_port(rpc_port); + cluster_builder = cluster_builder.with_fullnode_rpc_port(rpc_port); } let mut test_cluster = cluster_builder.build().await?; diff --git a/crates/sui-config/src/lib.rs b/crates/sui-config/src/lib.rs index cfe94f2d3b02e..5d0f8481099c6 100644 --- a/crates/sui-config/src/lib.rs +++ b/crates/sui-config/src/lib.rs @@ -11,11 +11,11 @@ use tracing::trace; pub mod certificate_deny_config; pub mod genesis; +pub mod local_ip_utils; pub mod node; pub mod node_config_metrics; pub mod p2p; pub mod transaction_deny_config; -pub mod utils; pub use node::{ConsensusConfig, NodeConfig}; diff --git a/crates/sui-config/src/local_ip_utils.rs b/crates/sui-config/src/local_ip_utils.rs new file mode 100644 index 0000000000000..a730891274c03 --- /dev/null +++ b/crates/sui-config/src/local_ip_utils.rs @@ -0,0 +1,140 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use std::net::SocketAddr; +#[cfg(msim)] +use std::sync::{atomic::AtomicI16, Arc}; +use sui_types::multiaddr::Multiaddr; + +/// A singleton struct to manage IP addresses and ports for simtest. +/// This allows us to generate unique IP addresses and ports for each node in simtest. +#[cfg(msim)] +pub struct SimAddressManager { + next_ip_offset: AtomicI16, + next_port: AtomicI16, +} + +#[cfg(msim)] +impl SimAddressManager { + pub fn new() -> Self { + Self { + next_ip_offset: AtomicI16::new(1), + next_port: AtomicI16::new(9000), + } + } + + pub fn get_next_ip(&self) -> String { + let offset = self + .next_ip_offset + .fetch_add(1, std::sync::atomic::Ordering::SeqCst); + // If offset ever goes beyond 255, we could use more bytes in the IP. + assert!(offset <= 255); + format!("10.10.0.{}", offset) + } + + pub fn get_next_available_port(&self) -> u16 { + self.next_port + .fetch_add(1, std::sync::atomic::Ordering::SeqCst) as u16 + } +} + +#[cfg(msim)] +fn get_sim_address_manager() -> Arc { + thread_local! { + // Uses Arc so that we could return a clone of the thread local singleton. + static SIM_ADDRESS_MANAGER: Arc = Arc::new(SimAddressManager::new()); + } + SIM_ADDRESS_MANAGER.with(|s| s.clone()) +} + +/// In simtest, we generate a new unique IP each time this function is called. +#[cfg(msim)] +pub fn get_new_ip() -> String { + get_sim_address_manager().get_next_ip() +} + +/// In non-simtest, we always only have one IP address which is localhost. +#[cfg(not(msim))] +pub fn get_new_ip() -> String { + localhost_for_testing() +} + +/// Returns localhost, which is always 127.0.0.1. +pub fn localhost_for_testing() -> String { + "127.0.0.1".to_string() +} + +/// Returns an available port for the given host in simtest. +/// We don't care about host because it's all managed by simulator. Just obtain a unique port. +#[cfg(msim)] +pub fn get_available_port(_host: &str) -> u16 { + get_sim_address_manager().get_next_available_port() +} + +/// Return an ephemeral, available port. On unix systems, the port returned will be in the +/// TIME_WAIT state ensuring that the OS won't hand out this port for some grace period. +/// Callers should be able to bind to this port given they use SO_REUSEADDR. +#[cfg(not(msim))] +pub fn get_available_port(host: &str) -> u16 { + const MAX_PORT_RETRIES: u32 = 1000; + + for _ in 0..MAX_PORT_RETRIES { + if let Ok(port) = get_ephemeral_port(host) { + return port; + } + } + + panic!("Error: could not find an available port"); +} + +#[cfg(not(msim))] +fn get_ephemeral_port(host: &str) -> std::io::Result { + use std::net::{TcpListener, TcpStream}; + + // Request a random available port from the OS + let listener = TcpListener::bind((host, 0))?; + let addr = listener.local_addr()?; + + // Create and accept a connection (which we'll promptly drop) in order to force the port + // into the TIME_WAIT state, ensuring that the port will be reserved from some limited + // amount of time (roughly 60s on some Linux systems) + let _sender = TcpStream::connect(addr)?; + let _incoming = listener.accept()?; + + Ok(addr.port()) +} + +/// Returns a new unique TCP address for the given host, by finding a new available port. +pub fn new_tcp_address_for_testing(host: &str) -> Multiaddr { + format!("/ip4/{}/tcp/{}/http", host, get_available_port(host)) + .parse() + .unwrap() +} + +/// Returns a new unique UDP address for the given host, by finding a new available port. +pub fn new_udp_address_for_testing(host: &str) -> Multiaddr { + format!("/ip4/{}/udp/{}", host, get_available_port(host)) + .parse() + .unwrap() +} + +/// Returns a new unique TCP address (SocketAddr) for localhost, by finding a new available port on localhost. +pub fn new_local_tcp_socket_for_testing() -> SocketAddr { + format!( + "{}:{}", + localhost_for_testing(), + get_available_port(&localhost_for_testing()) + ) + .parse() + .unwrap() +} + +/// Returns a new unique TCP address (Multiaddr) for localhost, by finding a new available port on localhost. +pub fn new_local_tcp_address_for_testing() -> Multiaddr { + new_tcp_address_for_testing(&localhost_for_testing()) +} + +/// Returns a new unique UDP address for localhost, by finding a new available port. +pub fn new_local_udp_address_for_testing() -> Multiaddr { + new_udp_address_for_testing(&localhost_for_testing()) +} diff --git a/crates/sui-config/src/node.rs b/crates/sui-config/src/node.rs index ce19a65cd2ed5..da0593e203288 100644 --- a/crates/sui-config/src/node.rs +++ b/crates/sui-config/src/node.rs @@ -354,6 +354,17 @@ impl ExpensiveSafetyCheckConfig { } } + pub fn new_disable_all() -> Self { + Self { + enable_epoch_sui_conservation_check: false, + enable_deep_per_tx_sui_conservation_check: false, + force_disable_epoch_sui_conservation_check: true, + enable_state_consistency_check: false, + force_disable_state_consistency_check: true, + enable_move_vm_paranoid_checks: false, + } + } + pub fn enable_paranoid_checks(&mut self) { self.enable_move_vm_paranoid_checks = true } diff --git a/crates/sui-config/src/utils.rs b/crates/sui-config/src/utils.rs deleted file mode 100644 index 4bb7eeeb7a365..0000000000000 --- a/crates/sui-config/src/utils.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) Mysten Labs, Inc. -// SPDX-License-Identifier: Apache-2.0 - -use std::net::{IpAddr, TcpListener, TcpStream}; - -/// Return an ephemeral, available port. On unix systems, the port returned will be in the -/// TIME_WAIT state ensuring that the OS won't hand out this port for some grace period. -/// Callers should be able to bind to this port given they use SO_REUSEADDR. -pub fn get_available_port(host: &str) -> u16 { - const MAX_PORT_RETRIES: u32 = 1000; - - for _ in 0..MAX_PORT_RETRIES { - if let Ok(port) = get_ephemeral_port(host) { - return port; - } - } - - panic!("Error: could not find an available port"); -} - -fn get_ephemeral_port(host: &str) -> std::io::Result { - // Request a random available port from the OS - let listener = TcpListener::bind((host, 0))?; - let addr = listener.local_addr()?; - - // Create and accept a connection (which we'll promptly drop) in order to force the port - // into the TIME_WAIT state, ensuring that the port will be reserved from some limited - // amount of time (roughly 60s on some Linux systems) - let _sender = TcpStream::connect(addr)?; - let _incoming = listener.accept()?; - - Ok(addr.port()) -} - -pub fn new_tcp_network_address() -> sui_types::multiaddr::Multiaddr { - let host = format!("{}", get_local_ip_for_tests()); - format!("/ip4/{}/tcp/{}/http", host, get_available_port(&host)) - .parse() - .unwrap() -} - -pub fn new_udp_network_address() -> sui_types::multiaddr::Multiaddr { - let host = format!("{}", get_local_ip_for_tests()); - format!("/ip4/{}/udp/{}", host, get_available_port(&host)) - .parse() - .unwrap() -} - -pub fn available_local_socket_address() -> std::net::SocketAddr { - let host = "127.0.0.1"; - format!("{}:{}", host, get_available_port(host)) - .parse() - .unwrap() -} - -pub fn available_network_socket_address() -> std::net::SocketAddr { - let host = "127.0.0.1"; - format!("{}:{}", host, get_available_port(host)) - .parse() - .unwrap() -} - -pub fn socket_address_to_udp_multiaddr( - address: std::net::SocketAddr, -) -> sui_types::multiaddr::Multiaddr { - match address { - std::net::SocketAddr::V4(v4) => format!("/ip4/{}/udp/{}", v4.ip(), v4.port()), - std::net::SocketAddr::V6(v6) => format!("/ip6/{}/udp/{}", v6.ip(), v6.port()), - } - .parse() - .unwrap() -} - -#[cfg(msim)] -pub fn get_local_ip_for_tests() -> IpAddr { - let node = sui_simulator::runtime::NodeHandle::current(); - node.ip().expect("Current node should have an IP") -} - -#[cfg(not(msim))] -pub fn get_local_ip_for_tests() -> IpAddr { - "127.0.0.1".parse().unwrap() -} diff --git a/crates/sui-core/src/quorum_driver/reconfig_observer.rs b/crates/sui-core/src/quorum_driver/reconfig_observer.rs index db7407d65cc90..a5b814d919b91 100644 --- a/crates/sui-core/src/quorum_driver/reconfig_observer.rs +++ b/crates/sui-core/src/quorum_driver/reconfig_observer.rs @@ -113,7 +113,9 @@ impl ReconfigObserver for OnsiteReconfigObserver { Err(RecvError::Lagged(_)) => { continue; } - Err(RecvError::Closed) => panic!("Do not expect the channel to be closed"), + Err(RecvError::Closed) => { + panic!("Do not expect the channel to be closed") + } } } } diff --git a/crates/sui-core/src/test_utils.rs b/crates/sui-core/src/test_utils.rs index cd333836644ae..494aadbd1bafc 100644 --- a/crates/sui-core/src/test_utils.rs +++ b/crates/sui-core/src/test_utils.rs @@ -17,6 +17,7 @@ use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; use sui_config::genesis::Genesis; +use sui_config::local_ip_utils; use sui_framework::BuiltInFramework; use sui_genesis_builder::validator_info::ValidatorInfo; use sui_move_build::{BuildConfig, CompiledPackage, SuiPackageHooks}; @@ -233,10 +234,10 @@ async fn init_genesis( network_key: network_key_pair.public().clone(), gas_price: 1, commission_rate: 0, - network_address: sui_config::utils::new_tcp_network_address(), - p2p_address: sui_config::utils::new_udp_network_address(), - narwhal_primary_address: sui_config::utils::new_udp_network_address(), - narwhal_worker_address: sui_config::utils::new_udp_network_address(), + network_address: local_ip_utils::new_local_tcp_address_for_testing(), + p2p_address: local_ip_utils::new_local_udp_address_for_testing(), + narwhal_primary_address: local_ip_utils::new_local_udp_address_for_testing(), + narwhal_worker_address: local_ip_utils::new_local_udp_address_for_testing(), description: String::new(), image_url: String::new(), project_url: String::new(), diff --git a/crates/sui-framework/src/lib.rs b/crates/sui-framework/src/lib.rs index 19635d39dcd3d..8d5f660ef9d36 100644 --- a/crates/sui-framework/src/lib.rs +++ b/crates/sui-framework/src/lib.rs @@ -246,7 +246,7 @@ pub async fn compare_system_package( }; if let Err(e) = compatibility.check(&cur_module, &new_module) { - error!("Compatibility check failed, for new version of {id}: {e:?}"); + error!("Compatibility check failed, for new version of {id}::{name}: {e:?}"); return None; } } diff --git a/crates/sui-genesis-builder/src/lib.rs b/crates/sui-genesis-builder/src/lib.rs index 9cf8cbe759a07..e5618dfe4fd5c 100644 --- a/crates/sui-genesis-builder/src/lib.rs +++ b/crates/sui-genesis-builder/src/lib.rs @@ -1093,9 +1093,9 @@ mod test { use crate::Builder; use fastcrypto::traits::KeyPair; use sui_config::genesis::*; + use sui_config::local_ip_utils; use sui_config::node::DEFAULT_COMMISSION_RATE; use sui_config::node::DEFAULT_VALIDATOR_GAS_PRICE; - use sui_config::utils; use sui_types::base_types::SuiAddress; use sui_types::crypto::{ generate_proof_of_possession, get_key_pair_from_rng, AccountKeyPair, AuthorityKeyPair, @@ -1136,10 +1136,10 @@ mod test { network_key: network_key.public().clone(), gas_price: DEFAULT_VALIDATOR_GAS_PRICE, commission_rate: DEFAULT_COMMISSION_RATE, - network_address: utils::new_tcp_network_address(), - p2p_address: utils::new_udp_network_address(), - narwhal_primary_address: utils::new_udp_network_address(), - narwhal_worker_address: utils::new_udp_network_address(), + network_address: local_ip_utils::new_local_tcp_address_for_testing(), + p2p_address: local_ip_utils::new_local_udp_address_for_testing(), + narwhal_primary_address: local_ip_utils::new_local_udp_address_for_testing(), + narwhal_worker_address: local_ip_utils::new_local_udp_address_for_testing(), description: String::new(), image_url: String::new(), project_url: String::new(), diff --git a/crates/sui-json-rpc/tests/routing_tests.rs b/crates/sui-json-rpc/tests/routing_tests.rs index 817d3fa6f7cbe..9a861ef957c29 100644 --- a/crates/sui-json-rpc/tests/routing_tests.rs +++ b/crates/sui-json-rpc/tests/routing_tests.rs @@ -12,8 +12,7 @@ use jsonrpsee::RpcModule; use jsonrpsee_proc_macros::rpc; use prometheus::Registry; use std::env; -use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; -use sui_config::utils::get_available_port; +use sui_config::local_ip_utils; use sui_json_rpc::{JsonRpcServerBuilder, SuiRpcModule, CLIENT_TARGET_API_VERSION_HEADER}; use sui_open_rpc::Module; use sui_open_rpc_macros::open_rpc; @@ -23,15 +22,9 @@ async fn test_rpc_backward_compatibility() { let mut builder = JsonRpcServerBuilder::new("1.5", &Registry::new()); builder.register_module(TestApiModule).unwrap(); - let port = get_available_port("0.0.0.0"); - let _handle = builder - .start( - SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port)), - None, - ) - .await - .unwrap(); - let url = format!("http://0.0.0.0:{}", port); + let address = local_ip_utils::new_local_tcp_socket_for_testing(); + let _handle = builder.start(address, None).await.unwrap(); + let url = format!("http://0.0.0.0:{}", address.port()); // Test with un-versioned client let client = HttpClientBuilder::default().build(&url).unwrap(); @@ -108,15 +101,9 @@ async fn test_disable_routing() { let mut builder = JsonRpcServerBuilder::new("1.5", &Registry::new()); builder.register_module(TestApiModule).unwrap(); - let port = get_available_port("0.0.0.0"); - let _handle = builder - .start( - SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port)), - None, - ) - .await - .unwrap(); - let url = format!("http://0.0.0.0:{}", port); + let address = local_ip_utils::new_local_tcp_socket_for_testing(); + let _handle = builder.start(address, None).await.unwrap(); + let url = format!("http://0.0.0.0:{}", address.port()); // try to access old method directly should fail let client = HttpClientBuilder::default().build(&url).unwrap(); diff --git a/crates/sui-json-rpc/tests/subscription_tests.rs b/crates/sui-json-rpc/tests/subscription_tests.rs index b29c52b2488a6..a6719e5de6d0f 100644 --- a/crates/sui-json-rpc/tests/subscription_tests.rs +++ b/crates/sui-json-rpc/tests/subscription_tests.rs @@ -34,7 +34,7 @@ async fn test_subscribe_transaction() -> Result<(), anyhow::Error> { .unwrap(); let (_, _, digest) = wallet.create_devnet_nft(package_id).await; - wait_for_tx(digest, cluster.fullnode_handle.sui_node.state().clone()).await; + wait_for_tx(digest, cluster.fullnode_handle.sui_node.state()).await; // Wait for streaming let effects = match timeout(Duration::from_secs(5), sub.next()).await { diff --git a/crates/sui-node/src/handle.rs b/crates/sui-node/src/handle.rs index 3b02a45622fda..cf13c6006d325 100644 --- a/crates/sui-node/src/handle.rs +++ b/crates/sui-node/src/handle.rs @@ -45,8 +45,10 @@ use super::SuiNode; use std::future::Future; use std::sync::Arc; +use sui_core::authority::AuthorityState; /// Wrap SuiNode to allow correct access to SuiNode in simulator tests. +#[derive(Clone)] pub struct SuiNodeHandle(Option>); impl SuiNodeHandle { @@ -62,6 +64,10 @@ impl SuiNodeHandle { let _guard = self.guard(); cb(self.inner()) } + + pub fn state(&self) -> Arc { + self.with(|sui_node| sui_node.state()) + } } #[cfg(not(msim))] diff --git a/crates/sui-node/src/lib.rs b/crates/sui-node/src/lib.rs index a1b59c264594e..95c80eb89201e 100644 --- a/crates/sui-node/src/lib.rs +++ b/crates/sui-node/src/lib.rs @@ -1187,6 +1187,11 @@ impl SuiNode { ); new_epoch_store } + + #[cfg(msim)] + pub fn get_sim_node_id(&self) -> sui_simulator::task::NodeId { + self.sim_node.id() + } } /// Notify state-sync that a new list of trusted peers are now available. diff --git a/crates/sui-rosetta/tests/rosetta_client.rs b/crates/sui-rosetta/tests/rosetta_client.rs index f8515457c7adb..855fb4d86dcbe 100644 --- a/crates/sui-rosetta/tests/rosetta_client.rs +++ b/crates/sui-rosetta/tests/rosetta_client.rs @@ -12,7 +12,7 @@ use serde::Serialize; use serde_json::Value; use tokio::task::JoinHandle; -use sui_config::utils; +use sui_config::local_ip_utils; use sui_keys::keystore::AccountKeystore; use sui_keys::keystore::Keystore; use sui_rosetta::operations::Operations; @@ -33,11 +33,11 @@ pub async fn start_rosetta_test_server( ) -> (RosettaClient, Vec>>) { let online_server = RosettaOnlineServer::new(SuiEnv::LocalNet, client); let offline_server = RosettaOfflineServer::new(SuiEnv::LocalNet); - let local_ip = utils::get_local_ip_for_tests().to_string(); - let port = utils::get_available_port(&local_ip); + let local_ip = local_ip_utils::localhost_for_testing(); + let port = local_ip_utils::get_available_port(&local_ip); let rosetta_address = format!("{}:{}", local_ip, port); let online_handle = online_server.serve(SocketAddr::from_str(&rosetta_address).unwrap()); - let offline_port = utils::get_available_port(&local_ip); + let offline_port = local_ip_utils::get_available_port(&local_ip); let offline_address = format!("{}:{}", local_ip, offline_port); let offline_handle = offline_server.serve(SocketAddr::from_str(&offline_address).unwrap()); diff --git a/crates/sui-surfer/src/surfer_state.rs b/crates/sui-surfer/src/surfer_state.rs index ad65dd14a2c1d..46da2adb44796 100644 --- a/crates/sui-surfer/src/surfer_state.rs +++ b/crates/sui-surfer/src/surfer_state.rs @@ -197,6 +197,12 @@ impl SurferState { async fn process_tx_effects(&mut self, effects: &SuiTransactionBlockEffects) { for (owned_ref, write_kind) in effects.all_changed_objects() { + if matches!(owned_ref.owner, Owner::ObjectOwner(_)) { + // For object owned objects, we don't need to do anything. + // We also cannot read them because in the case of shared objects, there can be + // races and the child object may no longer exist. + continue; + } let obj_ref = owned_ref.reference.to_object_ref(); let object = self .cluster diff --git a/crates/sui-swarm-config/Cargo.toml b/crates/sui-swarm-config/Cargo.toml index a46df147b8737..bf31799d8d4b4 100644 --- a/crates/sui-swarm-config/Cargo.toml +++ b/crates/sui-swarm-config/Cargo.toml @@ -36,6 +36,7 @@ sui-storage = { path = "../sui-storage" } sui-types = { path = "../sui-types" } sui-genesis-builder = { path = "../sui-genesis-builder" } workspace-hack = { version = "0.1", path = "../workspace-hack" } + [target.'cfg(msim)'.dependencies] sui-simulator = { path = "../sui-simulator" } diff --git a/crates/sui-swarm-config/src/genesis_config.rs b/crates/sui-swarm-config/src/genesis_config.rs index 7dade602159c1..ae8bb049125f3 100644 --- a/crates/sui-swarm-config/src/genesis_config.rs +++ b/crates/sui-swarm-config/src/genesis_config.rs @@ -1,7 +1,7 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::net::SocketAddr; use anyhow::Result; use fastcrypto::traits::KeyPair; @@ -9,10 +9,9 @@ use rand::{rngs::StdRng, SeedableRng}; use serde::{Deserialize, Serialize}; use sui_config::genesis::{GenesisCeremonyParameters, TokenAllocation}; use sui_config::node::{DEFAULT_COMMISSION_RATE, DEFAULT_VALIDATOR_GAS_PRICE}; -use sui_config::utils; -use sui_config::Config; +use sui_config::{local_ip_utils, Config}; use sui_genesis_builder::validator_info::ValidatorInfo; -use sui_types::base_types::{ObjectID, SuiAddress}; +use sui_types::base_types::SuiAddress; use sui_types::crypto::{ get_key_pair_from_rng, AccountKeyPair, AuthorityKeyPair, AuthorityPublicKeyBytes, NetworkKeyPair, NetworkPublicKey, PublicKey, SuiKeyPair, @@ -45,127 +44,6 @@ pub struct ValidatorGenesisConfig { } impl ValidatorGenesisConfig { - pub const DEFAULT_NETWORK_PORT: u16 = 1000; - pub const DEFAULT_P2P_PORT: u16 = 2000; - pub const DEFAULT_P2P_LISTEN_PORT: u16 = 3000; - pub const DEFAULT_METRICS_PORT: u16 = 4000; - pub const DEFAULT_NARWHAL_METRICS_PORT: u16 = 5000; - pub const DEFAULT_NARWHAL_PRIMARY_PORT: u16 = 6000; - pub const DEFAULT_NARWHAL_WORKER_PORT: u16 = 7000; - - pub fn from_localhost_for_testing( - key_pair: AuthorityKeyPair, - worker_key_pair: NetworkKeyPair, - account_key_pair: SuiKeyPair, - network_key_pair: NetworkKeyPair, - gas_price: u64, - ) -> Self { - Self { - key_pair, - worker_key_pair, - account_key_pair, - network_key_pair, - network_address: utils::new_tcp_network_address(), - p2p_address: utils::new_udp_network_address(), - p2p_listen_address: None, - metrics_address: utils::available_local_socket_address(), - narwhal_metrics_address: utils::new_tcp_network_address(), - gas_price, - commission_rate: DEFAULT_COMMISSION_RATE, - narwhal_primary_address: utils::new_udp_network_address(), - narwhal_worker_address: utils::new_udp_network_address(), - consensus_address: utils::new_tcp_network_address(), - consensus_internal_worker_address: None, - stake: sui_types::governance::VALIDATOR_LOW_STAKE_THRESHOLD_MIST, - } - } - - pub fn from_base_ip( - key_pair: AuthorityKeyPair, - worker_key_pair: NetworkKeyPair, - account_key_pair: SuiKeyPair, - network_key_pair: NetworkKeyPair, - p2p_listen_address: Option, - ip: String, - // Port offset allows running many SuiNodes inside the same simulator node, which is - // helpful for tests that don't use Swarm. - port_offset: usize, - gas_price: u64, - ) -> Self { - assert!(port_offset < 1000); - let port_offset: u16 = port_offset.try_into().unwrap(); - let make_tcp_addr = - |port: u16| -> Multiaddr { format!("/ip4/{ip}/tcp/{port}/http").parse().unwrap() }; - let make_udp_addr = - |port: u16| -> Multiaddr { format!("/ip4/{ip}/udp/{port}").parse().unwrap() }; - let make_tcp_zero_addr = - |port: u16| -> Multiaddr { format!("/ip4/0.0.0.0/tcp/{port}/http").parse().unwrap() }; - - Self { - key_pair, - worker_key_pair, - account_key_pair, - network_key_pair, - network_address: make_tcp_addr(Self::DEFAULT_NETWORK_PORT + port_offset), - p2p_address: make_udp_addr(Self::DEFAULT_P2P_PORT + port_offset), - p2p_listen_address: p2p_listen_address - .map(|x| SocketAddr::new(x, Self::DEFAULT_P2P_LISTEN_PORT + port_offset)), - metrics_address: format!("0.0.0.0:{}", Self::DEFAULT_METRICS_PORT + port_offset) - .parse() - .unwrap(), - narwhal_metrics_address: make_tcp_zero_addr( - Self::DEFAULT_NARWHAL_METRICS_PORT + port_offset, - ), - gas_price, - commission_rate: DEFAULT_COMMISSION_RATE, - narwhal_primary_address: make_udp_addr( - Self::DEFAULT_NARWHAL_PRIMARY_PORT + port_offset, - ), - narwhal_worker_address: make_udp_addr(Self::DEFAULT_NARWHAL_WORKER_PORT + port_offset), - consensus_address: make_tcp_addr(4000 + port_offset), - consensus_internal_worker_address: None, - stake: sui_types::governance::VALIDATOR_LOW_STAKE_THRESHOLD_MIST, - } - } - - pub fn new( - index: usize, - key_pair: AuthorityKeyPair, - worker_key_pair: NetworkKeyPair, - account_key_pair: SuiKeyPair, - network_key_pair: NetworkKeyPair, - gas_price: u64, - ) -> Self { - if cfg!(msim) { - // we will probably never run this many validators in a sim - let low_octet = index + 1; - if low_octet > 255 { - todo!("smarter IP formatting required"); - } - - let ip = format!("10.10.0.{}", low_octet); - - Self::from_base_ip( - key_pair, - worker_key_pair, - account_key_pair, - network_key_pair, - None, - ip, - index, - gas_price, - ) - } else { - Self::from_localhost_for_testing( - key_pair, - worker_key_pair, - account_key_pair, - network_key_pair, - gas_price, - ) - } - } - pub fn to_validator_info(&self, name: String) -> ValidatorInfo { let protocol_key: AuthorityPublicKeyBytes = self.key_pair.public().into(); let account_key: PublicKey = self.account_key_pair.public(); @@ -192,6 +70,77 @@ impl ValidatorGenesisConfig { } } +#[derive(Default)] +pub struct ValidatorGenesisConfigBuilder { + protocol_key_pair: Option, + account_key_pair: Option, + ip: Option, + gas_price: Option, +} + +impl ValidatorGenesisConfigBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn with_protocol_key_pair(mut self, key_pair: AuthorityKeyPair) -> Self { + self.protocol_key_pair = Some(key_pair); + self + } + + pub fn with_account_key_pair(mut self, key_pair: AccountKeyPair) -> Self { + self.account_key_pair = Some(key_pair); + self + } + + pub fn with_ip(mut self, ip: String) -> Self { + self.ip = Some(ip); + self + } + + pub fn with_gas_price(mut self, gas_price: u64) -> Self { + self.gas_price = Some(gas_price); + self + } + + pub fn build(self, rng: &mut R) -> ValidatorGenesisConfig { + let ip = self.ip.unwrap_or_else(local_ip_utils::get_new_ip); + let localhost = local_ip_utils::localhost_for_testing(); + + let protocol_key_pair = self + .protocol_key_pair + .unwrap_or_else(|| get_key_pair_from_rng(rng).1); + let account_key_pair = self + .account_key_pair + .unwrap_or_else(|| get_key_pair_from_rng(rng).1); + let gas_price = self.gas_price.unwrap_or(DEFAULT_VALIDATOR_GAS_PRICE); + + let (worker_key_pair, network_key_pair): (NetworkKeyPair, NetworkKeyPair) = + (get_key_pair_from_rng(rng).1, get_key_pair_from_rng(rng).1); + + ValidatorGenesisConfig { + key_pair: protocol_key_pair, + worker_key_pair, + account_key_pair: account_key_pair.into(), + network_key_pair, + network_address: local_ip_utils::new_tcp_address_for_testing(&ip), + p2p_address: local_ip_utils::new_udp_address_for_testing(&ip), + p2p_listen_address: None, + metrics_address: local_ip_utils::new_tcp_address_for_testing(&localhost) + .to_socket_addr() + .unwrap(), + narwhal_metrics_address: local_ip_utils::new_tcp_address_for_testing(&localhost), + gas_price, + commission_rate: DEFAULT_COMMISSION_RATE, + narwhal_primary_address: local_ip_utils::new_udp_address_for_testing(&ip), + narwhal_worker_address: local_ip_utils::new_udp_address_for_testing(&ip), + consensus_address: local_ip_utils::new_tcp_address_for_testing(&ip), + consensus_internal_worker_address: None, + stake: sui_types::governance::VALIDATOR_LOW_STAKE_THRESHOLD_MIST, + } + } +} + #[derive(Serialize, Deserialize, Default)] pub struct GenesisConfig { pub validator_config_info: Option>, @@ -238,14 +187,11 @@ impl GenesisConfig { } fn default_socket_address() -> SocketAddr { - utils::available_local_socket_address() + local_ip_utils::new_local_tcp_socket_for_testing() } fn default_multiaddr_address() -> Multiaddr { - let addr = utils::available_local_socket_address(); - format!("/ip4/{:?}/tcp/{}/http", addr.ip(), addr.port()) - .parse() - .unwrap() + local_ip_utils::new_local_tcp_address_for_testing() } fn default_stake() -> u64 { @@ -325,16 +271,9 @@ impl GenesisConfig { let validator_config_info: Vec<_> = ips .iter() .map(|ip| { - ValidatorGenesisConfig::from_base_ip( - AuthorityKeyPair::generate(&mut rng), // key_pair - NetworkKeyPair::generate(&mut rng), // worker_key_pair - SuiKeyPair::Ed25519(NetworkKeyPair::generate(&mut rng)), // account_key_pair - NetworkKeyPair::generate(&mut rng), // network_key_pair - Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), // p2p_listen_address - ip.to_string(), - Self::BENCHMARKS_PORT_OFFSET, - DEFAULT_VALIDATOR_GAS_PRICE, - ) + ValidatorGenesisConfigBuilder::new() + .with_ip(ip.to_string()) + .build(&mut rng) }) .collect(); @@ -372,14 +311,4 @@ impl GenesisConfig { let mut rng = StdRng::seed_from_u64(Self::BENCHMARKS_RNG_SEED); SuiKeyPair::Ed25519(NetworkKeyPair::generate(&mut rng)) } - - /// Generate several predictable and fixed gas object id offsets for benchmarks. Load generators - /// and other benchmark facilities may also need to retrieve these id offsets (hence the importance - /// of the seedable rng). - pub fn benchmark_gas_object_id_offsets(quantity: usize) -> Vec { - let mut rng = StdRng::seed_from_u64(Self::BENCHMARKS_RNG_SEED); - (0..quantity) - .map(|_| ObjectID::random_from_rng(&mut rng)) - .collect() - } } diff --git a/crates/sui-swarm-config/src/lib.rs b/crates/sui-swarm-config/src/lib.rs index 1d3205326c7ae..596ff86d49a7c 100644 --- a/crates/sui-swarm-config/src/lib.rs +++ b/crates/sui-swarm-config/src/lib.rs @@ -4,3 +4,4 @@ pub mod genesis_config; pub mod network_config; pub mod network_config_builder; +pub mod node_config_builder; diff --git a/crates/sui-swarm-config/src/network_config_builder.rs b/crates/sui-swarm-config/src/network_config_builder.rs index 43d9b6bde8373..dbcd2bf60315e 100644 --- a/crates/sui-swarm-config/src/network_config_builder.rs +++ b/crates/sui-swarm-config/src/network_config_builder.rs @@ -1,40 +1,18 @@ // Copyright (c) Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::genesis_config::{AccountConfig, DEFAULT_GAS_AMOUNT}; +use crate::genesis_config::{AccountConfig, ValidatorGenesisConfigBuilder, DEFAULT_GAS_AMOUNT}; use crate::genesis_config::{GenesisConfig, ValidatorGenesisConfig}; use crate::network_config::NetworkConfig; -use fastcrypto::encoding::{Encoding, Hex}; -use narwhal_config::{ - NetworkAdminServerParameters, Parameters as ConsensusParameters, PrometheusMetricsParameters, -}; +use crate::node_config_builder::ValidatorConfigBuilder; use rand::rngs::OsRng; -use rand::RngCore; -use std::net::{IpAddr, SocketAddr}; -use std::{ - num::NonZeroUsize, - path::{Path, PathBuf}, - sync::Arc, -}; +use std::path::PathBuf; +use std::{num::NonZeroUsize, path::Path, sync::Arc}; use sui_config::genesis::{TokenAllocation, TokenDistributionScheduleBuilder}; -use sui_config::node::{ - default_enable_index_processing, default_end_of_epoch_broadcast_channel_capacity, - AuthorityKeyPairWithPath, AuthorityStorePruningConfig, DBCheckpointConfig, - ExpensiveSafetyCheckConfig, KeyPairWithPath, StateDebugDumpConfig, - DEFAULT_GRPC_CONCURRENCY_LIMIT, DEFAULT_VALIDATOR_GAS_PRICE, -}; -use sui_config::utils; -use sui_config::{ - p2p::{P2pConfig, SeedPeer}, - ConsensusConfig, NodeConfig, AUTHORITIES_DB_NAME, CONSENSUS_DB_NAME, -}; use sui_protocol_config::SupportedProtocolVersions; use sui_types::base_types::{AuthorityName, SuiAddress}; use sui_types::committee::{Committee, ProtocolVersion}; -use sui_types::crypto::{ - generate_proof_of_possession, get_key_pair_from_rng, AccountKeyPair, AuthorityKeyPair, - AuthorityPublicKeyBytes, KeypairTraits, NetworkKeyPair, PublicKey, SuiKeyPair, -}; +use sui_types::crypto::{generate_proof_of_possession, AccountKeyPair, KeypairTraits, PublicKey}; use sui_types::object::Object; pub enum CommitteeConfig { @@ -43,11 +21,6 @@ pub enum CommitteeConfig { AccountKeys(Vec), } -enum ValidatorIpSelection { - Localhost, - Simulator, -} - pub type SupportedProtocolVersionsCallback = Arc< dyn Fn( usize, /* validator idx */ @@ -65,24 +38,18 @@ pub enum ProtocolVersionsConfig { // Use one range for all validators. Global(SupportedProtocolVersions), // A closure that returns the versions for each validator. + // TODO: This doesn't apply to fullnodes. PerValidator(SupportedProtocolVersionsCallback), } pub struct ConfigBuilder { rng: Option, config_directory: PathBuf, - randomize_ports: bool, - committee: Option, + supported_protocol_versions_config: Option, + committee: CommitteeConfig, genesis_config: Option, reference_gas_price: Option, additional_objects: Vec, - with_swarm: bool, - validator_ip_sel: ValidatorIpSelection, - // the versions that are supported by each validator - supported_protocol_versions_config: ProtocolVersionsConfig, - - db_checkpoint_config: DBCheckpointConfig, - state_debug_dump_config: StateDebugDumpConfig, } impl ConfigBuilder { @@ -90,22 +57,11 @@ impl ConfigBuilder { Self { rng: Some(OsRng), config_directory: config_directory.as_ref().into(), - randomize_ports: true, - committee: Some(CommitteeConfig::Size(NonZeroUsize::new(1).unwrap())), + supported_protocol_versions_config: None, + committee: CommitteeConfig::Size(NonZeroUsize::new(1).unwrap()), genesis_config: None, reference_gas_price: None, additional_objects: vec![], - with_swarm: false, - // Set a sensible default here so that most tests can run with or without the - // simulator. - validator_ip_sel: if cfg!(msim) { - ValidatorIpSelection::Simulator - } else { - ValidatorIpSelection::Localhost - }, - supported_protocol_versions_config: ProtocolVersionsConfig::Default, - db_checkpoint_config: DBCheckpointConfig::default(), - state_debug_dump_config: StateDebugDumpConfig::default(), } } @@ -115,33 +71,23 @@ impl ConfigBuilder { } impl ConfigBuilder { - pub fn randomize_ports(mut self, randomize_ports: bool) -> Self { - self.randomize_ports = randomize_ports; - self - } - - pub fn with_swarm(mut self) -> Self { - self.with_swarm = true; - self - } - pub fn committee(mut self, committee: CommitteeConfig) -> Self { - self.committee = Some(committee); + self.committee = committee; self } pub fn committee_size(mut self, committee_size: NonZeroUsize) -> Self { - self.committee = Some(CommitteeConfig::Size(committee_size)); + self.committee = CommitteeConfig::Size(committee_size); self } pub fn with_validator_account_keys(mut self, keys: Vec) -> Self { - self.committee = Some(CommitteeConfig::AccountKeys(keys)); + self.committee = CommitteeConfig::AccountKeys(keys); self } pub fn with_validators(mut self, validators: Vec) -> Self { - self.committee = Some(CommitteeConfig::Validators(validators)); + self.committee = CommitteeConfig::Validators(validators); self } @@ -181,7 +127,7 @@ impl ConfigBuilder { } pub fn with_supported_protocol_versions(mut self, c: SupportedProtocolVersions) -> Self { - self.supported_protocol_versions_config = ProtocolVersionsConfig::Global(c); + self.supported_protocol_versions_config = Some(ProtocolVersionsConfig::Global(c)); self } @@ -189,22 +135,12 @@ impl ConfigBuilder { mut self, func: SupportedProtocolVersionsCallback, ) -> Self { - self.supported_protocol_versions_config = ProtocolVersionsConfig::PerValidator(func); + self.supported_protocol_versions_config = Some(ProtocolVersionsConfig::PerValidator(func)); self } pub fn with_supported_protocol_versions_config(mut self, c: ProtocolVersionsConfig) -> Self { - self.supported_protocol_versions_config = c; - self - } - - pub fn with_db_checkpoint_config(mut self, db_checkpoint_config: DBCheckpointConfig) -> Self { - self.db_checkpoint_config = db_checkpoint_config; - self - } - - pub fn with_debug_dump_config(mut self, state_debug_dump_config: StateDebugDumpConfig) -> Self { - self.state_debug_dump_config = state_debug_dump_config; + self.supported_protocol_versions_config = Some(c); self } @@ -212,16 +148,11 @@ impl ConfigBuilder { ConfigBuilder { rng: Some(rng), config_directory: self.config_directory, - randomize_ports: self.randomize_ports, + supported_protocol_versions_config: self.supported_protocol_versions_config, committee: self.committee, genesis_config: self.genesis_config, reference_gas_price: self.reference_gas_price, additional_objects: self.additional_objects, - with_swarm: self.with_swarm, - validator_ip_sel: self.validator_ip_sel, - supported_protocol_versions_config: self.supported_protocol_versions_config, - db_checkpoint_config: self.db_checkpoint_config, - state_debug_dump_config: self.state_debug_dump_config, } } @@ -235,30 +166,10 @@ impl ConfigBuilder { impl ConfigBuilder { //TODO right now we always randomize ports, we may want to have a default port configuration - pub fn build(mut self) -> NetworkConfig { - let committee = self.committee.take().unwrap(); - - let mut rng = self.rng.take().unwrap(); - - let validator_with_account_key = |idx: usize, - protocol_key_pair: AuthorityKeyPair, - account_key_pair: AccountKeyPair, - rng: &mut R| - -> ValidatorGenesisConfig { - let (worker_key_pair, network_key_pair): (NetworkKeyPair, NetworkKeyPair) = - (get_key_pair_from_rng(rng).1, get_key_pair_from_rng(rng).1); - - ValidatorGenesisConfig::new( - idx, - protocol_key_pair, - worker_key_pair, - account_key_pair.into(), - network_key_pair, - self.reference_gas_price - .unwrap_or(DEFAULT_VALIDATOR_GAS_PRICE), - ) - }; + pub fn build(self) -> NetworkConfig { + let committee = self.committee; + let mut rng = self.rng.unwrap(); let validators = match committee { CommitteeConfig::Size(size) => { // We always get fixed protocol keys from this function (which is isolated from @@ -268,11 +179,13 @@ impl ConfigBuilder { let (_, keys) = Committee::new_simple_test_committee_of_size(size.into()); keys.into_iter() - .enumerate() - .map(|(i, authority_key)| { - let account_key_pair = - get_key_pair_from_rng::(&mut rng).1; - validator_with_account_key(i, authority_key, account_key_pair, &mut rng) + .map(|authority_key| { + let mut builder = ValidatorGenesisConfigBuilder::new() + .with_protocol_key_pair(authority_key); + if let Some(rgp) = self.reference_gas_price { + builder = builder.with_gas_price(rgp); + } + builder.build(&mut rng) }) .collect::>() } @@ -284,24 +197,22 @@ impl ConfigBuilder { let (_, protocol_keys) = Committee::new_simple_test_committee_of_size(keys.len()); keys.into_iter() .zip(protocol_keys.into_iter()) - .enumerate() - .map(|(i, (account_key, protocol_key))| { - validator_with_account_key(i, protocol_key, account_key, &mut rng) + .map(|(account_key, protocol_key)| { + let mut builder = ValidatorGenesisConfigBuilder::new() + .with_protocol_key_pair(protocol_key) + .with_account_key_pair(account_key); + if let Some(rgp) = self.reference_gas_price { + builder = builder.with_gas_price(rgp); + } + builder.build(&mut rng) }) .collect::>() } }; - self.build_with_validators(rng, validators) - } - - fn build_with_validators( - mut self, - mut rng: R, - validators: Vec, - ) -> NetworkConfig { - self.get_or_init_genesis_config(); - let genesis_config = self.genesis_config.unwrap(); + let genesis_config = self + .genesis_config + .unwrap_or_else(GenesisConfig::for_local_testing); let (account_keys, allocations) = genesis_config.generate_accounts(&mut rng).unwrap(); @@ -359,106 +270,20 @@ impl ConfigBuilder { .into_iter() .enumerate() .map(|(idx, validator)| { - let public_key: AuthorityPublicKeyBytes = validator.key_pair.public().into(); - let mut key_path = Hex::encode(public_key); - key_path.truncate(12); - let db_path = self - .config_directory - .join(AUTHORITIES_DB_NAME) - .join(key_path.clone()); - let network_address = validator.network_address; - let consensus_address = validator.consensus_address; - let consensus_db_path = - self.config_directory.join(CONSENSUS_DB_NAME).join(key_path); - let internal_worker_address = validator.consensus_internal_worker_address; - let consensus_config = ConsensusConfig { - address: consensus_address, - db_path: consensus_db_path, - internal_worker_address, - max_pending_transactions: None, - max_submit_position: None, - submit_delay_step_override_millis: None, - narwhal_config: ConsensusParameters { - network_admin_server: match self.validator_ip_sel { - ValidatorIpSelection::Simulator => NetworkAdminServerParameters { - primary_network_admin_server_port: 8889, - worker_network_admin_server_base_port: 8890, - }, - _ => NetworkAdminServerParameters { - primary_network_admin_server_port: utils::get_available_port( - "127.0.0.1", - ), - worker_network_admin_server_base_port: utils::get_available_port( - "127.0.0.1", - ), - }, - }, - prometheus_metrics: PrometheusMetricsParameters { - socket_addr: validator.narwhal_metrics_address, - }, - ..Default::default() - }, - }; - - let p2p_config = P2pConfig { - listen_address: validator.p2p_listen_address.unwrap_or_else(|| { - validator - .p2p_address - .udp_multiaddr_to_listen_address() - .unwrap() - }), - external_address: Some(validator.p2p_address), - ..Default::default() - }; - - let supported_protocol_versions = match &self.supported_protocol_versions_config { - ProtocolVersionsConfig::Default => SupportedProtocolVersions::SYSTEM_DEFAULT, - ProtocolVersionsConfig::Global(v) => *v, - ProtocolVersionsConfig::PerValidator(func) => func(idx, Some(public_key)), - }; - - NodeConfig { - protocol_key_pair: AuthorityKeyPairWithPath::new(validator.key_pair), - network_key_pair: KeyPairWithPath::new(SuiKeyPair::Ed25519( - validator.network_key_pair, - )), - account_key_pair: KeyPairWithPath::new(validator.account_key_pair), - worker_key_pair: KeyPairWithPath::new(SuiKeyPair::Ed25519( - validator.worker_key_pair, - )), - db_path, - network_address, - metrics_address: validator.metrics_address, - // TODO: admin server is hard coded to start on 127.0.0.1 - we should probably - // provide the entire socket address here to avoid confusion. - admin_interface_port: match self.validator_ip_sel { - ValidatorIpSelection::Simulator => 8888, - _ => utils::get_available_port("127.0.0.1"), - }, - json_rpc_address: utils::available_local_socket_address(), - consensus_config: Some(consensus_config), - enable_event_processing: false, - enable_index_processing: default_enable_index_processing(), - genesis: sui_config::node::Genesis::new(genesis.clone()), - grpc_load_shed: None, - grpc_concurrency_limit: Some(DEFAULT_GRPC_CONCURRENCY_LIMIT), - p2p_config, - authority_store_pruning_config: AuthorityStorePruningConfig::validator_config(), - end_of_epoch_broadcast_channel_capacity: - default_end_of_epoch_broadcast_channel_capacity(), - checkpoint_executor_config: Default::default(), - metrics: None, - supported_protocol_versions: Some(supported_protocol_versions), - db_checkpoint_config: self.db_checkpoint_config.clone(), - indirect_objects_threshold: usize::MAX, - expensive_safety_check_config: ExpensiveSafetyCheckConfig::new_enable_all(), - name_service_package_address: None, - name_service_registry_id: None, - name_service_reverse_registry_id: None, - transaction_deny_config: Default::default(), - certificate_deny_config: Default::default(), - state_debug_dump_config: self.state_debug_dump_config.clone(), + let mut builder = ValidatorConfigBuilder::new(self.config_directory.clone()); + if let Some(spvc) = &self.supported_protocol_versions_config { + let supported_versions = match spvc { + ProtocolVersionsConfig::Default => { + SupportedProtocolVersions::SYSTEM_DEFAULT + } + ProtocolVersionsConfig::Global(v) => *v, + ProtocolVersionsConfig::PerValidator(func) => { + func(idx, Some(validator.key_pair.public().into())) + } + }; + builder = builder.with_supported_protocol_versions(supported_versions); } + builder.build(validator, genesis.clone()) }) .collect(); NetworkConfig { @@ -517,224 +342,6 @@ mod tests { } } -pub struct FullnodeConfigBuilder<'a> { - network_config: &'a NetworkConfig, - dir: Option, - enable_event_store: bool, - listen_ip: Option, - // port for main network_address - port: Option, - // port for p2p data sync - p2p_port: Option, - // port for json rpc api - rpc_port: Option, - // port for admin interface - admin_port: Option, - supported_protocol_versions_config: ProtocolVersionsConfig, - db_checkpoint_config: DBCheckpointConfig, -} - -impl<'a> FullnodeConfigBuilder<'a> { - pub fn new(network_config: &'a NetworkConfig) -> Self { - Self { - network_config, - dir: None, - enable_event_store: false, - listen_ip: None, - port: None, - p2p_port: None, - rpc_port: None, - admin_port: None, - supported_protocol_versions_config: ProtocolVersionsConfig::Default, - db_checkpoint_config: DBCheckpointConfig::default(), - } - } - - // The EventStore uses a non-deterministic async pool which breaks determinism in - // the simulator, so do not enable with_event_store in tests unless the test specifically - // requires events. - // TODO: In the simulator, we may be able to run event store in a separate thread and make - // blocking calls to it to fix this. - pub fn with_event_store(mut self) -> Self { - self.enable_event_store = true; - self - } - - pub fn with_listen_ip(mut self, ip: IpAddr) -> Self { - self.listen_ip = Some(ip); - self - } - - pub fn with_port(mut self, port: u16) -> Self { - self.port = Some(port); - self - } - - pub fn with_p2p_port(mut self, port: u16) -> Self { - self.p2p_port = Some(port); - self - } - - pub fn with_rpc_port(mut self, port: u16) -> Self { - self.rpc_port = Some(port); - self - } - - pub fn set_rpc_port(mut self, port: Option) -> Self { - self.rpc_port = port; - self - } - - pub fn with_admin_port(mut self, port: u16) -> Self { - self.admin_port = Some(port); - self - } - - pub fn set_event_store(mut self, status: bool) -> Self { - self.enable_event_store = status; - self - } - - pub fn with_dir(mut self, dir: PathBuf) -> Self { - self.dir = Some(dir); - self - } - - pub fn with_random_dir(mut self) -> Self { - self.dir = None; - self - } - - pub fn with_supported_protocol_versions(mut self, c: SupportedProtocolVersions) -> Self { - self.supported_protocol_versions_config = ProtocolVersionsConfig::Global(c); - self - } - - pub fn with_supported_protocol_version_callback( - mut self, - func: SupportedProtocolVersionsCallback, - ) -> Self { - self.supported_protocol_versions_config = ProtocolVersionsConfig::PerValidator(func); - self - } - - pub fn with_supported_protocol_versions_config(mut self, c: ProtocolVersionsConfig) -> Self { - self.supported_protocol_versions_config = c; - self - } - - pub fn with_db_checkpoint_config(mut self, db_checkpoint_config: DBCheckpointConfig) -> Self { - self.db_checkpoint_config = db_checkpoint_config; - self - } - - pub fn build(self) -> Result { - let protocol_key_pair = get_key_pair_from_rng::(&mut OsRng).1; - let worker_key_pair = get_key_pair_from_rng::(&mut OsRng).1; - let account_key_pair = get_key_pair_from_rng::(&mut OsRng).1; - let network_key_pair = get_key_pair_from_rng::(&mut OsRng).1; - let validator_configs = &self.network_config.validator_configs; - let validator_config = &validator_configs[0]; - - let mut db_path = validator_config.db_path.clone(); - db_path.pop(); - - let dir_name = self - .dir - .unwrap_or_else(|| OsRng.next_u32().to_string().into()); - - let listen_ip = self.listen_ip.unwrap_or_else(utils::get_local_ip_for_tests); - let listen_ip_str = format!("{}", listen_ip); - - let get_available_port = |public_port| { - if listen_ip.is_loopback() || listen_ip == utils::get_local_ip_for_tests() { - utils::get_available_port(&listen_ip_str) - } else { - public_port - } - }; - - let network_address = format!( - "/ip4/{}/tcp/{}/http", - listen_ip, - self.port.unwrap_or_else(|| get_available_port(8080)) - ) - .parse() - .unwrap(); - - let p2p_config = { - let address = SocketAddr::new( - listen_ip, - self.p2p_port.unwrap_or_else(|| get_available_port(8084)), - ); - let seed_peers = validator_configs - .iter() - .map(|config| SeedPeer { - peer_id: Some(anemo::PeerId( - config.network_key_pair().public().0.to_bytes(), - )), - address: config.p2p_config.external_address.clone().unwrap(), - }) - .collect(); - - P2pConfig { - listen_address: address, - external_address: Some(utils::socket_address_to_udp_multiaddr(address)), - seed_peers, - ..Default::default() - } - }; - - let rpc_port = self.rpc_port.unwrap_or_else(|| get_available_port(9000)); - let jsonrpc_server_url = format!("{}:{}", listen_ip, rpc_port); - let json_rpc_address: SocketAddr = jsonrpc_server_url.parse().unwrap(); - - let supported_protocol_versions = match &self.supported_protocol_versions_config { - ProtocolVersionsConfig::Default => SupportedProtocolVersions::SYSTEM_DEFAULT, - ProtocolVersionsConfig::Global(v) => *v, - ProtocolVersionsConfig::PerValidator(func) => func(0, None), - }; - - Ok(NodeConfig { - protocol_key_pair: AuthorityKeyPairWithPath::new(protocol_key_pair), - account_key_pair: KeyPairWithPath::new(SuiKeyPair::Ed25519(account_key_pair)), - worker_key_pair: KeyPairWithPath::new(SuiKeyPair::Ed25519(worker_key_pair)), - network_key_pair: KeyPairWithPath::new(SuiKeyPair::Ed25519(network_key_pair)), - - db_path: db_path.join(dir_name), - network_address, - metrics_address: utils::available_local_socket_address(), - // TODO: admin server is hard coded to start on 127.0.0.1 - we should probably - // provide the entire socket address here to avoid confusion. - admin_interface_port: self.admin_port.unwrap_or_else(|| get_available_port(8888)), - json_rpc_address, - consensus_config: None, - enable_event_processing: self.enable_event_store, - enable_index_processing: default_enable_index_processing(), - genesis: validator_config.genesis.clone(), - grpc_load_shed: None, - grpc_concurrency_limit: None, - p2p_config, - authority_store_pruning_config: AuthorityStorePruningConfig::fullnode_config(), - end_of_epoch_broadcast_channel_capacity: - default_end_of_epoch_broadcast_channel_capacity(), - checkpoint_executor_config: Default::default(), - metrics: None, - supported_protocol_versions: Some(supported_protocol_versions), - db_checkpoint_config: self.db_checkpoint_config, - indirect_objects_threshold: usize::MAX, - // Copy the expensive safety check config from the first validator config. - expensive_safety_check_config: validator_config.expensive_safety_check_config.clone(), - name_service_package_address: None, - name_service_registry_id: None, - name_service_reverse_registry_id: None, - transaction_deny_config: Default::default(), - certificate_deny_config: Default::default(), - state_debug_dump_config: Default::default(), - }) - } -} - #[cfg(test)] mod test { use std::collections::HashSet; diff --git a/crates/sui-swarm-config/src/node_config_builder.rs b/crates/sui-swarm-config/src/node_config_builder.rs new file mode 100644 index 0000000000000..14d04a8f2890e --- /dev/null +++ b/crates/sui-swarm-config/src/node_config_builder.rs @@ -0,0 +1,301 @@ +// Copyright (c) Mysten Labs, Inc. +// SPDX-License-Identifier: Apache-2.0 + +use crate::genesis_config::{ValidatorGenesisConfig, ValidatorGenesisConfigBuilder}; +use crate::network_config::NetworkConfig; +use fastcrypto::encoding::{Encoding, Hex}; +use fastcrypto::traits::KeyPair; +use narwhal_config::{NetworkAdminServerParameters, PrometheusMetricsParameters}; +use std::net::SocketAddr; +use std::path::PathBuf; +use sui_config::node::{ + default_enable_index_processing, default_end_of_epoch_broadcast_channel_capacity, + AuthorityKeyPairWithPath, AuthorityStorePruningConfig, DBCheckpointConfig, + ExpensiveSafetyCheckConfig, KeyPairWithPath, DEFAULT_GRPC_CONCURRENCY_LIMIT, +}; +use sui_config::p2p::{P2pConfig, SeedPeer}; +use sui_config::{ + local_ip_utils, ConsensusConfig, NodeConfig, AUTHORITIES_DB_NAME, CONSENSUS_DB_NAME, + FULL_NODE_DB_PATH, +}; +use sui_protocol_config::SupportedProtocolVersions; +use sui_types::crypto::{AuthorityKeyPair, AuthorityPublicKeyBytes, SuiKeyPair}; + +/// This builder contains information that's not included in ValidatorGenesisConfig for building +/// a validator NodeConfig. It can be used to build either a genesis validator or a new validator. +#[derive(Clone)] +pub struct ValidatorConfigBuilder { + config_directory: PathBuf, + supported_protocol_versions: Option, +} + +impl ValidatorConfigBuilder { + pub fn new(config_directory: PathBuf) -> Self { + Self { + config_directory, + supported_protocol_versions: None, + } + } + + pub fn with_supported_protocol_versions( + mut self, + supported_protocol_versions: SupportedProtocolVersions, + ) -> Self { + assert!(self.supported_protocol_versions.is_none()); + self.supported_protocol_versions = Some(supported_protocol_versions); + self + } + + pub fn build( + self, + validator: ValidatorGenesisConfig, + genesis: sui_config::genesis::Genesis, + ) -> NodeConfig { + let key_path = get_key_path(&validator.key_pair); + let config_directory = self.config_directory; + let db_path = config_directory + .join(AUTHORITIES_DB_NAME) + .join(key_path.clone()); + + let network_address = validator.network_address; + let consensus_address = validator.consensus_address; + let consensus_db_path = config_directory.join(CONSENSUS_DB_NAME).join(key_path); + let internal_worker_address = validator.consensus_internal_worker_address; + let localhost = local_ip_utils::localhost_for_testing(); + let consensus_config = ConsensusConfig { + address: consensus_address, + db_path: consensus_db_path, + internal_worker_address, + max_pending_transactions: None, + max_submit_position: None, + submit_delay_step_override_millis: None, + narwhal_config: narwhal_config::Parameters { + network_admin_server: NetworkAdminServerParameters { + primary_network_admin_server_port: local_ip_utils::get_available_port( + &localhost, + ), + worker_network_admin_server_base_port: local_ip_utils::get_available_port( + &localhost, + ), + }, + prometheus_metrics: PrometheusMetricsParameters { + socket_addr: validator.narwhal_metrics_address, + }, + ..Default::default() + }, + }; + + let p2p_config = P2pConfig { + listen_address: validator.p2p_listen_address.unwrap_or_else(|| { + validator + .p2p_address + .udp_multiaddr_to_listen_address() + .unwrap() + }), + external_address: Some(validator.p2p_address), + ..Default::default() + }; + + NodeConfig { + protocol_key_pair: AuthorityKeyPairWithPath::new(validator.key_pair), + network_key_pair: KeyPairWithPath::new(SuiKeyPair::Ed25519(validator.network_key_pair)), + account_key_pair: KeyPairWithPath::new(validator.account_key_pair), + worker_key_pair: KeyPairWithPath::new(SuiKeyPair::Ed25519(validator.worker_key_pair)), + db_path, + network_address, + metrics_address: validator.metrics_address, + admin_interface_port: local_ip_utils::get_available_port(&localhost), + json_rpc_address: local_ip_utils::new_tcp_address_for_testing(&localhost) + .to_socket_addr() + .unwrap(), + consensus_config: Some(consensus_config), + enable_event_processing: false, + enable_index_processing: default_enable_index_processing(), + genesis: sui_config::node::Genesis::new(genesis), + grpc_load_shed: None, + grpc_concurrency_limit: Some(DEFAULT_GRPC_CONCURRENCY_LIMIT), + p2p_config, + authority_store_pruning_config: AuthorityStorePruningConfig::validator_config(), + end_of_epoch_broadcast_channel_capacity: + default_end_of_epoch_broadcast_channel_capacity(), + checkpoint_executor_config: Default::default(), + metrics: None, + supported_protocol_versions: self.supported_protocol_versions, + db_checkpoint_config: Default::default(), + indirect_objects_threshold: usize::MAX, + expensive_safety_check_config: ExpensiveSafetyCheckConfig::new_enable_all(), + name_service_package_address: None, + name_service_registry_id: None, + name_service_reverse_registry_id: None, + transaction_deny_config: Default::default(), + certificate_deny_config: Default::default(), + state_debug_dump_config: Default::default(), + } + } + + pub fn build_new_validator( + self, + rng: &mut R, + network_config: &NetworkConfig, + ) -> NodeConfig { + let validator_config = ValidatorGenesisConfigBuilder::new().build(rng); + self.build(validator_config, network_config.genesis.clone()) + } +} + +#[derive(Clone, Debug, Default)] +pub struct FullnodeConfigBuilder { + config_directory: Option, + // port for json rpc api + rpc_port: Option, + rpc_addr: Option, + supported_protocol_versions: Option, + db_checkpoint_config: Option, + expensive_safety_check_config: Option, +} + +impl FullnodeConfigBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn with_config_directory(mut self, config_directory: PathBuf) -> Self { + self.config_directory = Some(config_directory); + self + } + + pub fn with_rpc_port(mut self, port: u16) -> Self { + assert!(self.rpc_addr.is_none() && self.rpc_port.is_none()); + self.rpc_port = Some(port); + self + } + + pub fn with_rpc_addr(mut self, addr: SocketAddr) -> Self { + assert!(self.rpc_addr.is_none() && self.rpc_port.is_none()); + self.rpc_addr = Some(addr); + self + } + + pub fn with_supported_protocol_versions(mut self, versions: SupportedProtocolVersions) -> Self { + self.supported_protocol_versions = Some(versions); + self + } + + pub fn with_db_checkpoint_config(mut self, db_checkpoint_config: DBCheckpointConfig) -> Self { + self.db_checkpoint_config = Some(db_checkpoint_config); + self + } + + pub fn with_expensive_safety_check_config( + mut self, + expensive_safety_check_config: ExpensiveSafetyCheckConfig, + ) -> Self { + self.expensive_safety_check_config = Some(expensive_safety_check_config); + self + } + + pub fn build( + self, + rng: &mut R, + network_config: &NetworkConfig, + ) -> NodeConfig { + // Take advantage of ValidatorGenesisConfigBuilder to build the keypairs and addresses, + // even though this is a fullnode. + let validator_config = ValidatorGenesisConfigBuilder::new().build(rng); + let ip = validator_config + .network_address + .to_socket_addr() + .unwrap() + .ip() + .to_string(); + + let key_path = get_key_path(&validator_config.key_pair); + let config_directory = self + .config_directory + .unwrap_or_else(|| tempfile::tempdir().unwrap().into_path()); + let db_path = config_directory.join(FULL_NODE_DB_PATH).join(key_path); + + let p2p_config = { + let seed_peers = network_config + .validator_configs + .iter() + .map(|config| SeedPeer { + peer_id: Some(anemo::PeerId( + config.network_key_pair().public().0.to_bytes(), + )), + address: config.p2p_config.external_address.clone().unwrap(), + }) + .collect(); + + P2pConfig { + listen_address: validator_config.p2p_listen_address.unwrap_or_else(|| { + validator_config + .p2p_address + .udp_multiaddr_to_listen_address() + .unwrap() + }), + external_address: Some(validator_config.p2p_address), + seed_peers, + ..Default::default() + } + }; + + let localhost = local_ip_utils::localhost_for_testing(); + let json_rpc_address = self.rpc_addr.unwrap_or_else(|| { + let rpc_port = self + .rpc_port + .unwrap_or_else(|| local_ip_utils::get_available_port(&ip)); + format!("{}:{}", ip, rpc_port).parse().unwrap() + }); + + NodeConfig { + protocol_key_pair: AuthorityKeyPairWithPath::new(validator_config.key_pair), + account_key_pair: KeyPairWithPath::new(validator_config.account_key_pair), + worker_key_pair: KeyPairWithPath::new(SuiKeyPair::Ed25519( + validator_config.worker_key_pair, + )), + network_key_pair: KeyPairWithPath::new(SuiKeyPair::Ed25519( + validator_config.network_key_pair, + )), + + db_path, + network_address: validator_config.network_address, + metrics_address: local_ip_utils::new_local_tcp_socket_for_testing(), + admin_interface_port: local_ip_utils::get_available_port(&localhost), + json_rpc_address, + consensus_config: None, + enable_event_processing: true, // This is unused. + enable_index_processing: default_enable_index_processing(), + genesis: sui_config::node::Genesis::new(network_config.genesis.clone()), + grpc_load_shed: None, + grpc_concurrency_limit: None, + p2p_config, + authority_store_pruning_config: AuthorityStorePruningConfig::fullnode_config(), + end_of_epoch_broadcast_channel_capacity: + default_end_of_epoch_broadcast_channel_capacity(), + checkpoint_executor_config: Default::default(), + metrics: None, + supported_protocol_versions: self.supported_protocol_versions, + db_checkpoint_config: self.db_checkpoint_config.unwrap_or_default(), + indirect_objects_threshold: usize::MAX, + expensive_safety_check_config: self + .expensive_safety_check_config + .unwrap_or_else(ExpensiveSafetyCheckConfig::new_enable_all), + name_service_package_address: None, + name_service_registry_id: None, + name_service_reverse_registry_id: None, + transaction_deny_config: Default::default(), + certificate_deny_config: Default::default(), + state_debug_dump_config: Default::default(), + } + } +} + +/// Given a validator keypair, return a path that can be used to identify the validator. +fn get_key_path(key_pair: &AuthorityKeyPair) -> String { + let public_key: AuthorityPublicKeyBytes = key_pair.public().into(); + let mut key_path = Hex::encode(public_key); + // 12 is rather arbitrary here but it's a nice balance between being short and being unique. + key_path.truncate(12); + key_path +} diff --git a/crates/sui-swarm/src/memory/container-sim.rs b/crates/sui-swarm/src/memory/container-sim.rs index 229986fca3537..3deb7d793eb1c 100644 --- a/crates/sui-swarm/src/memory/container-sim.rs +++ b/crates/sui-swarm/src/memory/container-sim.rs @@ -42,8 +42,7 @@ impl Container { let handle = sui_simulator::runtime::Handle::current(); let builder = handle.create_node(); - let socket_addr = - mysten_network::multiaddr::to_socket_addr(&config.network_address).unwrap(); + let socket_addr = config.network_address.to_socket_addr().unwrap(); let ip = match socket_addr { SocketAddr::V4(v4) => IpAddr::V4(*v4.ip()), _ => panic!("unsupported protocol"), diff --git a/crates/sui-swarm/src/memory/swarm.rs b/crates/sui-swarm/src/memory/swarm.rs index 179e573f0e4d9..ffc2d50a33c5a 100644 --- a/crates/sui-swarm/src/memory/swarm.rs +++ b/crates/sui-swarm/src/memory/swarm.rs @@ -13,14 +13,15 @@ use std::{ path::{Path, PathBuf}, }; use sui_config::node::DBCheckpointConfig; +use sui_config::NodeConfig; use sui_node::SuiNodeHandle; use sui_protocol_config::{ProtocolVersion, SupportedProtocolVersions}; use sui_swarm_config::genesis_config::{AccountConfig, GenesisConfig, ValidatorGenesisConfig}; use sui_swarm_config::network_config::NetworkConfig; -use sui_swarm_config::network_config_builder::FullnodeConfigBuilder; use sui_swarm_config::network_config_builder::{ CommitteeConfig, ConfigBuilder, ProtocolVersionsConfig, SupportedProtocolVersionsCallback, }; +use sui_swarm_config::node_config_builder::FullnodeConfigBuilder; use sui_types::base_types::AuthorityName; use sui_types::object::Object; use tempfile::TempDir; @@ -31,11 +32,14 @@ pub struct SwarmBuilder { dir: Option, committee: CommitteeConfig, genesis_config: Option, + network_config: Option, additional_objects: Vec, fullnode_count: usize, + fullnode_rpc_port: Option, fullnode_rpc_addr: Option, - with_event_store: bool, supported_protocol_versions_config: ProtocolVersionsConfig, + // Default to supported_protocol_versions_config, but can be overridden. + fullnode_supported_protocol_versions_config: Option, db_checkpoint_config: DBCheckpointConfig, } @@ -47,11 +51,13 @@ impl SwarmBuilder { dir: None, committee: CommitteeConfig::Size(NonZeroUsize::new(1).unwrap()), genesis_config: None, + network_config: None, additional_objects: vec![], fullnode_count: 0, + fullnode_rpc_port: None, fullnode_rpc_addr: None, - with_event_store: false, supported_protocol_versions_config: ProtocolVersionsConfig::Default, + fullnode_supported_protocol_versions_config: None, db_checkpoint_config: DBCheckpointConfig::default(), } } @@ -64,12 +70,15 @@ impl SwarmBuilder { dir: self.dir, committee: self.committee, genesis_config: self.genesis_config, + network_config: self.network_config, additional_objects: self.additional_objects, fullnode_count: self.fullnode_count, + fullnode_rpc_port: self.fullnode_rpc_port, fullnode_rpc_addr: self.fullnode_rpc_addr, - with_event_store: false, - supported_protocol_versions_config: ProtocolVersionsConfig::Default, - db_checkpoint_config: DBCheckpointConfig::default(), + supported_protocol_versions_config: self.supported_protocol_versions_config, + fullnode_supported_protocol_versions_config: self + .fullnode_supported_protocol_versions_config, + db_checkpoint_config: self.db_checkpoint_config, } } @@ -97,11 +106,17 @@ impl SwarmBuilder { } pub fn with_genesis_config(mut self, genesis_config: GenesisConfig) -> Self { - assert!(self.genesis_config.is_none()); + assert!(self.network_config.is_none() && self.genesis_config.is_none()); self.genesis_config = Some(genesis_config); self } + pub fn with_network_config(mut self, network_config: NetworkConfig) -> Self { + assert!(self.network_config.is_none() && self.genesis_config.is_none()); + self.network_config = Some(network_config); + self + } + pub fn with_accounts(mut self, accounts: Vec) -> Self { self.get_or_init_genesis_config().accounts = accounts; self @@ -117,7 +132,14 @@ impl SwarmBuilder { self } + pub fn with_fullnode_rpc_port(mut self, fullnode_rpc_port: u16) -> Self { + assert!(self.fullnode_rpc_addr.is_none()); + self.fullnode_rpc_port = Some(fullnode_rpc_port); + self + } + pub fn with_fullnode_rpc_addr(mut self, fullnode_rpc_addr: SocketAddr) -> Self { + assert!(self.fullnode_rpc_port.is_none()); self.fullnode_rpc_addr = Some(fullnode_rpc_addr); self } @@ -154,6 +176,14 @@ impl SwarmBuilder { self } + pub fn with_fullnode_supported_protocol_versions_config( + mut self, + c: ProtocolVersionsConfig, + ) -> Self { + self.fullnode_supported_protocol_versions_config = Some(c); + self + } + pub fn with_db_checkpoint_config(mut self, db_checkpoint_config: DBCheckpointConfig) -> Self { self.db_checkpoint_config = db_checkpoint_config; self @@ -161,6 +191,7 @@ impl SwarmBuilder { fn get_or_init_genesis_config(&mut self) -> &mut GenesisConfig { if self.genesis_config.is_none() { + assert!(self.network_config.is_none()); self.genesis_config = Some(GenesisConfig::for_local_testing()); } self.genesis_config.as_mut().unwrap() @@ -176,21 +207,22 @@ impl SwarmBuilder { SwarmDirectory::Temporary(TempDir::new().unwrap()) }; - let mut config_builder = ConfigBuilder::new(dir.as_ref()); + let network_config = self.network_config.unwrap_or_else(|| { + let mut config_builder = ConfigBuilder::new(dir.as_ref()); - if let Some(genesis_config) = self.genesis_config { - config_builder = config_builder.with_genesis_config(genesis_config); - } + if let Some(genesis_config) = self.genesis_config { + config_builder = config_builder.with_genesis_config(genesis_config); + } - let network_config = config_builder - .committee(self.committee) - .with_swarm() - .rng(self.rng) - .with_objects(self.additional_objects) - .with_supported_protocol_versions_config( - self.supported_protocol_versions_config.clone(), - ) - .build(); + config_builder + .committee(self.committee) + .rng(self.rng) + .with_objects(self.additional_objects) + .with_supported_protocol_versions_config( + self.supported_protocol_versions_config.clone(), + ) + .build() + }); let validators = network_config .validator_configs() @@ -199,21 +231,33 @@ impl SwarmBuilder { .collect(); let mut fullnodes = HashMap::new(); + let mut fullnode_config_builder = FullnodeConfigBuilder::new() + .with_config_directory(dir.as_ref().into()) + .with_db_checkpoint_config(self.db_checkpoint_config.clone()); + if let Some(spvc) = &self.fullnode_supported_protocol_versions_config { + let supported_versions = match spvc { + ProtocolVersionsConfig::Default => SupportedProtocolVersions::SYSTEM_DEFAULT, + ProtocolVersionsConfig::Global(v) => *v, + ProtocolVersionsConfig::PerValidator(func) => func(0, None), + }; + fullnode_config_builder = + fullnode_config_builder.with_supported_protocol_versions(supported_versions); + } if self.fullnode_count > 0 { - (0..self.fullnode_count).for_each(|_| { - let spvc = self.supported_protocol_versions_config.clone(); - //let spvc = spvc.clone(); - let mut config = FullnodeConfigBuilder::new(&network_config) - .with_supported_protocol_versions_config(spvc) - .with_db_checkpoint_config(self.db_checkpoint_config.clone()) - .with_random_dir() - .build() - .unwrap(); - - if let Some(fullnode_rpc_addr) = self.fullnode_rpc_addr { - config.json_rpc_address = fullnode_rpc_addr; + (0..self.fullnode_count).for_each(|idx| { + let mut builder = fullnode_config_builder.clone(); + if idx == 0 { + // Only the first fullnode is used as the rpc fullnode, we can only use the + // same address once. + if let Some(rpc_addr) = self.fullnode_rpc_addr { + builder = builder.with_rpc_addr(rpc_addr); + } + if let Some(rpc_port) = self.fullnode_rpc_port { + builder = builder.with_rpc_port(rpc_port); + } } + let config = builder.build(&mut OsRng, &network_config); fullnodes.insert(config.protocol_public_key(), Node::new(config)); }); } @@ -222,41 +266,7 @@ impl SwarmBuilder { network_config, validators, fullnodes, - } - } - - pub fn with_event_store(mut self) -> Self { - self.with_event_store = true; - self - } - - pub fn from_network_config(self, dir: PathBuf, network_config: NetworkConfig) -> Swarm { - let dir = SwarmDirectory::Persistent(dir); - - let validators = network_config - .validator_configs() - .iter() - .map(|config| (config.protocol_public_key(), Node::new(config.to_owned()))) - .collect(); - - let fullnodes = if let Some(fullnode_rpc_addr) = self.fullnode_rpc_addr { - let mut config = FullnodeConfigBuilder::new(&network_config) - .with_supported_protocol_versions_config(self.supported_protocol_versions_config) - .set_event_store(self.with_event_store) - .with_random_dir() - .build() - .unwrap(); - config.json_rpc_address = fullnode_rpc_addr; - HashMap::from([(config.protocol_public_key(), Node::new(config))]) - } else { - Default::default() - }; - - Swarm { - dir, - network_config, - validators, - fullnodes, + fullnode_config_builder, } } } @@ -268,6 +278,8 @@ pub struct Swarm { network_config: NetworkConfig, validators: HashMap, fullnodes: HashMap, + // Save a copy of the fullnode config builder to build future fullnodes. + fullnode_config_builder: FullnodeConfigBuilder, } impl Drop for Swarm { @@ -291,7 +303,7 @@ impl Swarm { /// Start all of the Validators associated with this Swarm pub async fn launch(&mut self) -> Result<()> { try_join_all(self.nodes_iter_mut().map(|node| node.start())).await?; - + tracing::info!("Successfully launched Swarm"); Ok(()) } @@ -345,6 +357,19 @@ impl Swarm { pub fn fullnodes(&self) -> impl Iterator { self.fullnodes.values() } + + pub async fn spawn_new_fullnode(&mut self, config: NodeConfig) -> SuiNodeHandle { + let name = config.protocol_public_key(); + let node = Node::new(config); + node.start().await.unwrap(); + let handle = node.get_node_handle().unwrap(); + self.fullnodes.insert(name, node); + handle + } + + pub fn get_fullnode_config_builder(&self) -> FullnodeConfigBuilder { + self.fullnode_config_builder.clone() + } } #[derive(Debug)] diff --git a/crates/sui/src/fire_drill.rs b/crates/sui/src/fire_drill.rs index 3bb0e533fa143..2d555a8e3df24 100644 --- a/crates/sui/src/fire_drill.rs +++ b/crates/sui/src/fire_drill.rs @@ -18,9 +18,8 @@ use fastcrypto::traits::{KeyPair, ToFromBytes}; use move_core_types::ident_str; use shared_crypto::intent::Intent; use std::path::{Path, PathBuf}; -use sui_config::node::KeyPairWithPath; -use sui_config::utils; -use sui_config::{node::AuthorityKeyPairWithPath, Config, NodeConfig, PersistedConfig}; +use sui_config::node::{AuthorityKeyPairWithPath, KeyPairWithPath}; +use sui_config::{local_ip_utils, Config, NodeConfig, PersistedConfig}; use sui_json_rpc_types::{SuiExecutionStatus, SuiTransactionBlockResponseOptions}; use sui_keys::keypair_file::read_keypair_from_file; use sui_sdk::{rpc_types::SuiTransactionBlockEffectsAPI, SuiClient, SuiClientBuilder}; @@ -166,7 +165,8 @@ async fn update_next_epoch_metadata( let http = new_network_address.pop().unwrap(); // pop out tcp new_network_address.pop().unwrap(); - let new_port = utils::get_available_port("127.0.0.1"); + let localhost = local_ip_utils::localhost_for_testing(); + let new_port = local_ip_utils::get_available_port(&localhost); new_network_address.push(Protocol::Tcp(new_port)); new_network_address.push(http); info!("New network address: {:?}", new_network_address); @@ -177,7 +177,7 @@ async fn update_next_epoch_metadata( info!("Current P2P external address: {:?}", new_external_address); // pop out udp new_external_address.pop().unwrap(); - let new_port = utils::get_available_port("127.0.0.1"); + let new_port = local_ip_utils::get_available_port(&localhost); new_external_address.push(Protocol::Udp(new_port)); info!("New P2P external address: {:?}", new_external_address); new_config.p2p_config.external_address = Some(new_external_address.clone()); @@ -194,7 +194,7 @@ async fn update_next_epoch_metadata( info!("Current primary address: {:?}", new_primary_addresses); // pop out udp new_primary_addresses.pop().unwrap(); - let new_port = utils::get_available_port("127.0.0.1"); + let new_port = local_ip_utils::get_available_port(&localhost); new_primary_addresses.push(Protocol::Udp(new_port)); info!("New primary address: {:?}", new_primary_addresses); @@ -211,7 +211,7 @@ async fn update_next_epoch_metadata( info!("Current worker address: {:?}", new_worker_addresses); // pop out udp new_worker_addresses.pop().unwrap(); - let new_port = utils::get_available_port("127.0.0.1"); + let new_port = local_ip_utils::get_available_port(&localhost); new_worker_addresses.push(Protocol::Udp(new_port)); info!("New worker address:: {:?}", new_worker_addresses); diff --git a/crates/sui/src/generate_genesis_checkpoint.rs b/crates/sui/src/generate_genesis_checkpoint.rs index 0522d47f18041..38a71c7a1a664 100644 --- a/crates/sui/src/generate_genesis_checkpoint.rs +++ b/crates/sui/src/generate_genesis_checkpoint.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use camino::Utf8PathBuf; -use sui_config::utils; +use sui_config::local_ip_utils; use sui_genesis_builder::validator_info::ValidatorInfo; use sui_genesis_builder::Builder; use sui_types::base_types::SuiAddress; @@ -31,10 +31,10 @@ async fn main() { network_key: network_key.public().clone(), gas_price: sui_config::node::DEFAULT_VALIDATOR_GAS_PRICE, commission_rate: sui_config::node::DEFAULT_COMMISSION_RATE, - network_address: utils::new_tcp_network_address(), - p2p_address: utils::new_udp_network_address(), - narwhal_primary_address: utils::new_udp_network_address(), - narwhal_worker_address: utils::new_udp_network_address(), + network_address: local_ip_utils::new_local_tcp_address_for_testing(), + p2p_address: local_ip_utils::new_local_udp_address_for_testing(), + narwhal_primary_address: local_ip_utils::new_local_udp_address_for_testing(), + narwhal_worker_address: local_ip_utils::new_local_udp_address_for_testing(), description: String::new(), image_url: String::new(), project_url: String::new(), diff --git a/crates/sui/src/genesis_ceremony.rs b/crates/sui/src/genesis_ceremony.rs index 18158588257d7..30c2b3e7da943 100644 --- a/crates/sui/src/genesis_ceremony.rs +++ b/crates/sui/src/genesis_ceremony.rs @@ -262,7 +262,7 @@ fn check_protocol_version(builder: &Builder, protocol_version: ProtocolVersion) mod test { use super::*; use anyhow::Result; - use sui_config::utils; + use sui_config::local_ip_utils; use sui_genesis_builder::validator_info::ValidatorInfo; use sui_keys::keypair_file::{write_authority_keypair_to_file, write_keypair_to_file}; use sui_types::crypto::{get_key_pair_from_rng, AccountKeyPair, AuthorityKeyPair, SuiKeyPair}; @@ -289,10 +289,10 @@ mod test { network_key: network_keypair.public().clone(), gas_price: sui_config::node::DEFAULT_VALIDATOR_GAS_PRICE, commission_rate: sui_config::node::DEFAULT_COMMISSION_RATE, - network_address: utils::new_tcp_network_address(), - p2p_address: utils::new_udp_network_address(), - narwhal_primary_address: utils::new_udp_network_address(), - narwhal_worker_address: utils::new_udp_network_address(), + network_address: local_ip_utils::new_local_tcp_address_for_testing(), + p2p_address: local_ip_utils::new_local_udp_address_for_testing(), + narwhal_primary_address: local_ip_utils::new_local_udp_address_for_testing(), + narwhal_worker_address: local_ip_utils::new_local_udp_address_for_testing(), description: String::new(), image_url: String::new(), project_url: String::new(), diff --git a/crates/sui/src/sui_commands.rs b/crates/sui/src/sui_commands.rs index 1b82ba8ab13e9..15581dddf6180 100644 --- a/crates/sui/src/sui_commands.rs +++ b/crates/sui/src/sui_commands.rs @@ -11,6 +11,7 @@ use anyhow::{anyhow, bail}; use clap::*; use fastcrypto::traits::KeyPair; use move_package::BuildConfig; +use rand::rngs::OsRng; use std::io::{stderr, stdout, Write}; use std::num::NonZeroUsize; use std::path::{Path, PathBuf}; @@ -31,7 +32,7 @@ use sui_swarm::memory::Swarm; use sui_swarm_config::genesis_config::{GenesisConfig, DEFAULT_NUMBER_OF_AUTHORITIES}; use sui_swarm_config::network_config::NetworkConfig; use sui_swarm_config::network_config_builder::ConfigBuilder; -use sui_swarm_config::network_config_builder::FullnodeConfigBuilder; +use sui_swarm_config::node_config_builder::FullnodeConfigBuilder; use sui_types::crypto::{SignatureScheme, SuiKeyPair}; use tracing::info; @@ -171,16 +172,17 @@ impl SuiCommand { network_config_path )) })?; - - let mut swarm = if no_full_node { - Swarm::builder() + let mut swarm_builder = Swarm::builder() + .dir(sui_config_dir()?) + .with_network_config(network_config); + if no_full_node { + swarm_builder = swarm_builder.with_fullnode_count(0); } else { - Swarm::builder() - .with_fullnode_rpc_addr(sui_config::node::default_json_rpc_address()) - .with_event_store() + swarm_builder = swarm_builder + .with_fullnode_count(1) + .with_fullnode_rpc_addr(sui_config::node::default_json_rpc_address()); } - .from_network_config(sui_config_dir()?, network_config); - + let mut swarm = swarm_builder.build(); swarm.launch().await?; let mut interval = tokio::time::interval(std::time::Duration::from_secs(3)); @@ -439,12 +441,11 @@ async fn genesis( info!("Client keystore is stored in {:?}.", keystore_path); - let mut fullnode_config = FullnodeConfigBuilder::new(&network_config) - .with_event_store() - .with_dir(FULL_NODE_DB_PATH.into()) - .build()?; + let fullnode_config = FullnodeConfigBuilder::new() + .with_config_directory(FULL_NODE_DB_PATH.into()) + .with_rpc_addr(sui_config::node::default_json_rpc_address()) + .build(&mut OsRng, &network_config); - fullnode_config.json_rpc_address = sui_config::node::default_json_rpc_address(); fullnode_config.save(sui_config_dir.join(SUI_FULLNODE_CONFIG))?; for (i, validator) in network_config diff --git a/crates/sui/src/unit_tests/cli_tests.rs b/crates/sui/src/unit_tests/cli_tests.rs index 70a1b7ad9e9e7..96cc28f0b7675 100644 --- a/crates/sui/src/unit_tests/cli_tests.rs +++ b/crates/sui/src/unit_tests/cli_tests.rs @@ -161,7 +161,7 @@ async fn test_genesis_for_benchmarks() -> Result<(), anyhow::Error> { for (i, expected_ip) in benchmark_ips.into_iter().enumerate() { let config_path = &working_dir.join(sui_config::validator_config_file(i)); let config = NodeConfig::load(config_path)?; - let socket_address = sui_types::multiaddr::to_socket_addr(&config.network_address).unwrap(); + let socket_address = config.network_address.to_socket_addr().unwrap(); assert_eq!(expected_ip, socket_address.ip().to_string()); } diff --git a/crates/sui/tests/full_node_tests.rs b/crates/sui/tests/full_node_tests.rs index 48f1dac773af0..ef9183b84261b 100644 --- a/crates/sui/tests/full_node_tests.rs +++ b/crates/sui/tests/full_node_tests.rs @@ -11,6 +11,7 @@ use move_core_types::parser::parse_struct_tag; use move_core_types::value::MoveStructLayout; use mysten_metrics::RegistryService; use prometheus::Registry; +use rand::rngs::OsRng; use serde_json::json; use sui::client_commands::{SuiClientCommandResult, SuiClientCommands}; use sui_json_rpc_types::{ @@ -20,7 +21,7 @@ use sui_json_rpc_types::{ use sui_json_rpc_types::{EventFilter, TransactionFilter}; use sui_keys::keystore::AccountKeystore; use sui_macros::*; -use sui_node::SuiNode; +use sui_node::{SuiNode, SuiNodeHandle}; use sui_sdk::wallet_context::WalletContext; use sui_test_transaction_builder::TestTransactionBuilder; use sui_tool::restore_from_db_checkpoint; @@ -45,7 +46,7 @@ use sui_types::utils::{ }; use sui_types::SUI_CLOCK_OBJECT_ID; use test_utils::authority::test_and_configure_authority_configs; -use test_utils::network::{start_fullnode_from_config, TestClusterBuilder}; +use test_utils::network::TestClusterBuilder; use test_utils::transaction::{wait_for_all_txes, wait_for_tx}; use tokio::sync::Mutex; use tokio::time::timeout; @@ -55,7 +56,7 @@ use tracing::info; #[sim_test] async fn test_full_node_follows_txes() -> Result<(), anyhow::Error> { let mut test_cluster = TestClusterBuilder::new().build().await?; - let node = test_cluster.start_fullnode().await?.sui_node; + let fullnode = test_cluster.start_fullnode().await.sui_node; let context = &mut test_cluster.wallet; @@ -65,19 +66,19 @@ async fn test_full_node_follows_txes() -> Result<(), anyhow::Error> { let (transferred_object, _, receiver, digest, _) = transfer_coin(context).await?; - wait_for_tx(digest, node.state().clone()).await; + wait_for_tx(digest, fullnode.state()).await; // A small delay is needed for post processing operations following the transaction to finish. sleep(Duration::from_secs(1)).await; // verify that the node has seen the transfer - let object_read = node.state().get_object_read(&transferred_object)?; + let object_read = fullnode.state().get_object_read(&transferred_object)?; let object = object_read.into_object()?; assert_eq!(object.owner.get_owner_address().unwrap(), receiver); // timestamp is recorded - let ts = node.state().get_timestamp_ms(&digest).await?; + let ts = fullnode.state().get_timestamp_ms(&digest).await?; assert!(ts.is_some()); Ok(()) @@ -86,7 +87,7 @@ async fn test_full_node_follows_txes() -> Result<(), anyhow::Error> { #[sim_test] async fn test_full_node_shared_objects() -> Result<(), anyhow::Error> { let mut test_cluster = TestClusterBuilder::new().build().await?; - let node = test_cluster.start_fullnode().await?.sui_node; + let handle = test_cluster.start_fullnode().await; let context = &mut test_cluster.wallet; @@ -97,7 +98,7 @@ async fn test_full_node_shared_objects() -> Result<(), anyhow::Error> { .increment_counter(sender, None, package_ref.0, counter_ref.0, counter_ref.1) .await; let digest = response.digest; - wait_for_tx(digest, node.state().clone()).await; + wait_for_tx(digest, handle.sui_node.state()).await; Ok(()) } @@ -170,7 +171,7 @@ async fn test_full_node_move_function_index() -> Result<(), anyhow::Error> { .await; let digest = response.digest; - wait_for_tx(digest, node.state().clone()).await; + wait_for_tx(digest, node.state()).await; let txes = node.state().get_transactions( Some(TransactionFilter::MoveFunction { package: package_ref.0, @@ -442,11 +443,11 @@ async fn test_full_node_cold_sync() -> Result<(), anyhow::Error> { sleep(Duration::from_millis(1000)).await; // Start a new fullnode that is not on the write path - let node = test_cluster.start_fullnode().await.unwrap().sui_node; + let fullnode = test_cluster.start_fullnode().await.sui_node; - wait_for_tx(digest, node.state().clone()).await; + wait_for_tx(digest, fullnode.state()).await; - let info = node + let info = fullnode .state() .handle_transaction_info_request(TransactionInfoRequest { transaction_digest: digest, @@ -460,10 +461,10 @@ async fn test_full_node_cold_sync() -> Result<(), anyhow::Error> { #[sim_test] async fn test_full_node_sync_flood() -> Result<(), anyhow::Error> { - let test_cluster = TestClusterBuilder::new().build().await?; + let mut test_cluster = TestClusterBuilder::new().build().await?; // Start a new fullnode that is not on the write path - let node = test_cluster.start_fullnode().await.unwrap().sui_node; + let fullnode = test_cluster.start_fullnode().await.sui_node; let context = test_cluster.wallet; @@ -543,7 +544,7 @@ async fn test_full_node_sync_flood() -> Result<(), anyhow::Error> { .map(|r| r.clone().unwrap()) .flat_map(|(a, b)| std::iter::once(a).chain(std::iter::once(b))) .collect(); - wait_for_all_txes(digests, node.state().clone()).await; + wait_for_all_txes(digests, fullnode.state()).await; Ok(()) } @@ -556,15 +557,7 @@ async fn test_full_node_sub_and_query_move_event_ok() -> Result<(), anyhow::Erro .await?; // Start a new fullnode that is not on the write path - let fullnode = start_fullnode_from_config( - test_cluster - .fullnode_config_builder() - .with_event_store() - .build() - .unwrap(), - ) - .await - .unwrap(); + let fullnode = test_cluster.start_fullnode().await; let node = fullnode.sui_node; let ws_client = fullnode.ws_client; @@ -657,7 +650,7 @@ async fn test_full_node_sub_and_query_move_event_ok() -> Result<(), anyhow::Erro #[sim_test] async fn test_full_node_event_read_api_ok() { let mut test_cluster = TestClusterBuilder::new() - .set_fullnode_rpc_port(50000) + .with_fullnode_rpc_port(50000) .enable_fullnode_events() .build() .await @@ -753,15 +746,17 @@ async fn test_full_node_event_query_by_module_ok() { #[sim_test] async fn test_full_node_transaction_orchestrator_basic() -> Result<(), anyhow::Error> { let mut test_cluster = TestClusterBuilder::new().build().await?; - let node = test_cluster.start_fullnode().await?.sui_node; + let fullnode = test_cluster.start_fullnode().await.sui_node; let context = &mut test_cluster.wallet; - let transaction_orchestrator = node - .transaction_orchestrator() - .expect("Fullnode should have transaction orchestrator toggled on."); - let mut rx = node - .subscribe_to_transaction_orchestrator_effects() - .expect("Fullnode should have transaction orchestrator toggled on."); + let transaction_orchestrator = fullnode.with(|node| { + node.transaction_orchestrator() + .expect("Fullnode should have transaction orchestrator toggled on.") + }); + let mut rx = fullnode.with(|node| { + node.subscribe_to_transaction_orchestrator_effects() + .expect("Fullnode should have transaction orchestrator toggled on.") + }); let txn_count = 4; let mut txns = context.batch_make_transfer_transactions(txn_count).await; @@ -797,7 +792,7 @@ async fn test_full_node_transaction_orchestrator_basic() -> Result<(), anyhow::E assert!(is_executed_locally); assert_eq!(events.digest(), txn_events.digest()); // verify that the node has sequenced and executed the txn - node.state().get_executed_transaction_and_effects(digest).await + fullnode.state().get_executed_transaction_and_effects(digest).await .unwrap_or_else(|e| panic!("Fullnode does not know about the txn {:?} that was executed with WaitForLocalExecution: {:?}", digest, e)); // Test WaitForEffectsCert @@ -825,8 +820,8 @@ async fn test_full_node_transaction_orchestrator_basic() -> Result<(), anyhow::E assert_eq!(cte.effects.digest(), *certified_txn_effects.digest()); assert_eq!(txn_events.digest(), events.digest()); assert!(!is_executed_locally); - wait_for_tx(digest, node.state().clone()).await; - node.state().get_executed_transaction_and_effects(digest).await + wait_for_tx(digest, fullnode.state()).await; + fullnode.state().get_executed_transaction_and_effects(digest).await .unwrap_or_else(|e| panic!("Fullnode does not know about the txn {:?} that was executed with WaitForEffectsCert: {:?}", digest, e)); Ok(()) @@ -957,7 +952,7 @@ async fn test_full_node_transaction_orchestrator_rpc_ok() -> Result<(), anyhow:: } async fn get_obj_read_from_node( - node: &SuiNode, + node: &SuiNodeHandle, object_id: ObjectID, ) -> Result<(ObjectRef, Object, Option), anyhow::Error> { if let ObjectRead::Exists(obj_ref, object, layout) = node.state().get_object_read(&object_id)? { @@ -968,7 +963,7 @@ async fn get_obj_read_from_node( } async fn get_past_obj_read_from_node( - node: &SuiNode, + node: &SuiNodeHandle, object_id: ObjectID, seq_num: SequenceNumber, ) -> Result<(ObjectRef, Object, Option), anyhow::Error> { @@ -986,7 +981,7 @@ async fn test_get_objects_read() -> Result<(), anyhow::Error> { telemetry_subscribers::init_for_testing(); let mut test_cluster = TestClusterBuilder::new().build().await?; let rgp = test_cluster.get_reference_gas_price().await; - let node = test_cluster.fullnode_handle.sui_node.clone(); + let node = &test_cluster.fullnode_handle.sui_node; let context = &mut test_cluster.wallet; let package_id = context.publish_nfts_package().await.0; @@ -997,7 +992,7 @@ async fn test_get_objects_read() -> Result<(), anyhow::Error> { let recipient = context.config.keystore.addresses().get(1).cloned().unwrap(); assert_ne!(sender, recipient); - let (object_ref_v1, object_v1, _) = get_obj_read_from_node(&node, object_id).await?; + let (object_ref_v1, object_v1, _) = get_obj_read_from_node(node, object_id).await?; // Transfer the object from sender to recipient let gas_ref = context @@ -1016,7 +1011,7 @@ async fn test_get_objects_read() -> Result<(), anyhow::Error> { .unwrap(); sleep(Duration::from_secs(1)).await; - let (object_ref_v2, object_v2, _) = get_obj_read_from_node(&node, object_id).await?; + let (object_ref_v2, object_v2, _) = get_obj_read_from_node(node, object_id).await?; assert_ne!(object_ref_v2, object_ref_v1); // Transfer some SUI to recipient @@ -1050,13 +1045,13 @@ async fn test_get_objects_read() -> Result<(), anyhow::Error> { assert_eq!(object_ref_v3, read_ref_v3); let (read_ref_v2, read_obj_v2, _) = - get_past_obj_read_from_node(&node, object_id, object_ref_v2.1).await?; + get_past_obj_read_from_node(node, object_id, object_ref_v2.1).await?; assert_eq!(read_ref_v2, object_ref_v2); assert_eq!(read_obj_v2, object_v2); assert_eq!(read_obj_v2.owner, Owner::AddressOwner(recipient)); let (read_ref_v1, read_obj_v1, _) = - get_past_obj_read_from_node(&node, object_id, object_ref_v1.1).await?; + get_past_obj_read_from_node(node, object_id, object_ref_v1.1).await?; assert_eq!(read_ref_v1, object_ref_v1); assert_eq!(read_obj_v1, object_v1); assert_eq!(read_obj_v1.owner, Owner::AddressOwner(sender)); @@ -1094,14 +1089,17 @@ async fn test_full_node_bootstrap_from_snapshot() -> Result<(), anyhow::Error> { .with_enable_db_checkpoints_fullnodes() .build() .await?; - let checkpoint_path = test_cluster.fullnode_handle.sui_node.db_checkpoint_path(); - let config = test_cluster.fullnode_config_builder().build()?; + let checkpoint_path = test_cluster + .fullnode_handle + .sui_node + .with(|node| node.db_checkpoint_path()); + let config = test_cluster + .fullnode_config_builder() + .build(&mut OsRng, test_cluster.swarm.config()); let epoch_0_db_path = config.db_path().join("store").join("epoch_0"); - let context = &mut test_cluster.wallet; - let _fullnode = test_cluster.fullnode_handle.sui_node.clone(); - let _ = transfer_coin(context).await?; - let _ = transfer_coin(context).await?; - let (_transferred_object, _, _, digest, ..) = transfer_coin(context).await?; + let _ = transfer_coin(&test_cluster.wallet).await?; + let _ = transfer_coin(&test_cluster.wallet).await?; + let (_transferred_object, _, _, digest, ..) = transfer_coin(&test_cluster.wallet).await?; // Skip the first epoch change from epoch 0 to epoch 1, but wait for the second // epoch change from epoch 1 to epoch 2 at which point during reconfiguration we will take @@ -1115,13 +1113,16 @@ async fn test_full_node_bootstrap_from_snapshot() -> Result<(), anyhow::Error> { // Spin up a new full node restored from the snapshot taken at the end of epoch 1 restore_from_db_checkpoint(&config, &checkpoint_path.join("epoch_1")).await?; - let node = start_fullnode_from_config(config).await?.sui_node; + let node = test_cluster + .start_fullnode_from_config(config) + .await + .sui_node; wait_for_tx(digest, node.state().clone()).await; loop { // Ensure this full node is able to transition to the next epoch - if node.current_epoch_for_testing() >= 2 { + if node.with(|node| node.current_epoch_for_testing()) >= 2 { break; } sleep(Duration::from_millis(500)).await; @@ -1131,7 +1132,8 @@ async fn test_full_node_bootstrap_from_snapshot() -> Result<(), anyhow::Error> { // doesn't exist assert!(!epoch_0_db_path.exists()); - let (_transferred_object, _, _, digest_after_restore, ..) = transfer_coin(context).await?; + let (_transferred_object, _, _, digest_after_restore, ..) = + transfer_coin(&test_cluster.wallet).await?; wait_for_tx(digest_after_restore, node.state().clone()).await; Ok(()) } @@ -1140,7 +1142,7 @@ async fn test_full_node_bootstrap_from_snapshot() -> Result<(), anyhow::Error> { async fn test_pass_back_clock_object() -> Result<(), anyhow::Error> { let mut test_cluster = TestClusterBuilder::new().build().await?; let rgp = test_cluster.get_reference_gas_price().await; - let node = test_cluster.start_fullnode().await?.sui_node; + let fullnode = test_cluster.start_fullnode().await.sui_node; let context = &mut test_cluster.wallet; @@ -1155,12 +1157,14 @@ async fn test_pass_back_clock_object() -> Result<(), anyhow::Error> { .unwrap() .unwrap(); - let transaction_orchestrator = node - .transaction_orchestrator() - .expect("Fullnode should have transaction orchestrator toggled on."); - let mut rx = node - .subscribe_to_transaction_orchestrator_effects() - .expect("Fullnode should have transaction orchestrator toggled on."); + let transaction_orchestrator = fullnode.with(|node| { + node.transaction_orchestrator() + .expect("Fullnode should have transaction orchestrator toggled on.") + }); + let mut rx = fullnode.with(|node| { + node.subscribe_to_transaction_orchestrator_effects() + .expect("Fullnode should have transaction orchestrator toggled on.") + }); let tx_data = TransactionData::new_move_call( sender, @@ -1200,7 +1204,7 @@ async fn test_pass_back_clock_object() -> Result<(), anyhow::Error> { } async fn transfer_coin( - context: &mut WalletContext, + context: &WalletContext, ) -> Result< ( ObjectID, diff --git a/crates/sui/tests/onsite_reconfig_observer_tests.rs b/crates/sui/tests/onsite_reconfig_observer_tests.rs index e5a0bf131ffc9..ad3595e2eda55 100644 --- a/crates/sui/tests/onsite_reconfig_observer_tests.rs +++ b/crates/sui/tests/onsite_reconfig_observer_tests.rs @@ -22,17 +22,18 @@ async fn test_onsite_reconfig_observer_basic() { let fullnode = &test_cluster.fullnode_handle.sui_node; - let qd = fullnode - .transaction_orchestrator() - .unwrap() - .clone_quorum_driver(); + let qd = fullnode.with(|node| { + node.transaction_orchestrator() + .unwrap() + .clone_quorum_driver() + }); assert_eq!(qd.current_epoch(), 0); - let rx = fullnode.subscribe_to_epoch_change(); + let rx = fullnode.with(|node| node.subscribe_to_epoch_change()); let registry = Registry::new(); let mut observer = OnsiteReconfigObserver::new( rx, - fullnode.clone_authority_store(), - fullnode.clone_committee_store(), + fullnode.with(|node| node.clone_authority_store()), + fullnode.with(|node| node.clone_committee_store()), SafeClientMetricsBase::new(®istry), AuthAggMetrics::new(®istry), ); @@ -45,17 +46,14 @@ async fn test_onsite_reconfig_observer_basic() { // Give it some time for the update to happen tokio::time::sleep(tokio::time::Duration::from_secs(3)).await; - let qd = fullnode - .transaction_orchestrator() - .unwrap() - .clone_quorum_driver(); + let qd = fullnode.with(|node| { + node.transaction_orchestrator() + .unwrap() + .clone_quorum_driver() + }); assert_eq!(qd.current_epoch(), 1); assert_eq!( - fullnode - .clone_authority_aggregator() - .unwrap() - .committee - .epoch, + fullnode.with(|node| node.clone_authority_aggregator().unwrap().committee.epoch), 1 ); } diff --git a/crates/sui/tests/protocol_version_tests.rs b/crates/sui/tests/protocol_version_tests.rs index 3433978d84029..334f71f84a7f9 100644 --- a/crates/sui/tests/protocol_version_tests.rs +++ b/crates/sui/tests/protocol_version_tests.rs @@ -575,14 +575,18 @@ mod sim_only_tests { }); let test_cluster = TestClusterBuilder::new() - .with_epoch_duration_ms(20000) .with_supported_protocol_versions(SupportedProtocolVersions::new_for_testing( START, FINISH, )) + .with_epoch_duration_ms(40000) .build() .await .unwrap(); + // We must stop the validators before overriding the system modules, otherwise the validators + // may start running before the override and hence send capabilities indicating that they + // only support the genesis system modules. + test_cluster.stop_all_validators().await; let first = test_cluster.swarm.validators().next().unwrap(); let first_name = first.name(); override_sui_system_modules_cb(Box::new(move |name| { @@ -593,6 +597,7 @@ mod sim_only_tests { Some(sui_system_modules("compatible")) } })); + test_cluster.start_all_validator().await; expect_upgrade_succeeded(&test_cluster).await; @@ -623,7 +628,7 @@ mod sim_only_tests { }); let test_cluster = TestClusterBuilder::new() - .with_epoch_duration_ms(20000) + .with_epoch_duration_ms(40000) .with_supported_protocol_versions(SupportedProtocolVersions::new_for_testing( START, FINISH, )) @@ -631,6 +636,7 @@ mod sim_only_tests { .await .unwrap(); + test_cluster.stop_all_validators().await; let mut validators = test_cluster.swarm.validators(); let first = validators.next().unwrap().name(); let second = validators.next().unwrap().name(); @@ -641,6 +647,7 @@ mod sim_only_tests { None } })); + test_cluster.start_all_validator().await; expect_upgrade_failed(&test_cluster).await; } @@ -711,6 +718,8 @@ mod sim_only_tests { #[sim_test] async fn sui_system_state_shallow_upgrade_test() { + override_sui_system_modules("mock_sui_systems/shallow_upgrade"); + let test_cluster = TestClusterBuilder::new() .with_epoch_duration_ms(20000) .with_supported_protocol_versions(SupportedProtocolVersions::new_for_testing( @@ -720,7 +729,6 @@ mod sim_only_tests { .build() .await .unwrap(); - override_sui_system_modules("mock_sui_systems/shallow_upgrade"); // Wait for the upgrade to finish. After the upgrade, the new framework will be installed, // but the system state object hasn't been upgraded yet. let system_state = test_cluster.wait_for_epoch(Some(1)).await; @@ -743,6 +751,8 @@ mod sim_only_tests { #[sim_test] async fn sui_system_state_deep_upgrade_test() { + override_sui_system_modules("mock_sui_systems/deep_upgrade"); + let test_cluster = TestClusterBuilder::new() .with_epoch_duration_ms(20000) .with_supported_protocol_versions(SupportedProtocolVersions::new_for_testing( @@ -752,7 +762,6 @@ mod sim_only_tests { .build() .await .unwrap(); - override_sui_system_modules("mock_sui_systems/deep_upgrade"); // Wait for the upgrade to finish. After the upgrade, the new framework will be installed, // but the system state object hasn't been upgraded yet. let system_state = test_cluster.wait_for_epoch(Some(1)).await; diff --git a/crates/sui/tests/reconfiguration_tests.rs b/crates/sui/tests/reconfiguration_tests.rs index 50e98f7d22b94..90b644bc7071d 100644 --- a/crates/sui/tests/reconfiguration_tests.rs +++ b/crates/sui/tests/reconfiguration_tests.rs @@ -169,8 +169,7 @@ async fn reconfig_with_revert_end_to_end_test() { let net = test_cluster .fullnode_handle .sui_node - .clone_authority_aggregator() - .unwrap(); + .with(|node| node.clone_authority_aggregator().unwrap()); let cert = net .process_transaction(tx.clone()) .await @@ -438,8 +437,7 @@ async fn test_validator_resign_effects() { let net = test_cluster .fullnode_handle .sui_node - .clone_authority_aggregator() - .unwrap(); + .with(|node| node.clone_authority_aggregator().unwrap()); let effects1 = net .process_transaction(tx) .await diff --git a/crates/sui/tests/simulator_tests.rs b/crates/sui/tests/simulator_tests.rs index 06169636f7bdc..890ce862dc67d 100644 --- a/crates/sui/tests/simulator_tests.rs +++ b/crates/sui/tests/simulator_tests.rs @@ -132,7 +132,7 @@ async fn test_net_determinism() { sleep(Duration::from_millis(1000)).await; - let node = test_cluster.start_fullnode().await.unwrap(); + let handle = test_cluster.start_fullnode().await; - wait_for_tx(digest, node.sui_node.state().clone()).await; + wait_for_tx(digest, handle.sui_node.state()).await; } diff --git a/crates/sui/tests/transaction_orchestrator_tests.rs b/crates/sui/tests/transaction_orchestrator_tests.rs index f42b25765e631..fb3af786c8ded 100644 --- a/crates/sui/tests/transaction_orchestrator_tests.rs +++ b/crates/sui/tests/transaction_orchestrator_tests.rs @@ -18,18 +18,22 @@ use tracing::info; async fn test_blocking_execution() -> Result<(), anyhow::Error> { let mut test_cluster = TestClusterBuilder::new().build().await?; let context = &mut test_cluster.wallet; - let node = &test_cluster.fullnode_handle.sui_node; + let handle = &test_cluster.fullnode_handle.sui_node; let temp_dir = tempfile::tempdir().unwrap(); - let reconfig_channel = node.subscribe_to_epoch_change(); - let orchestrator = TransactiondOrchestrator::new_with_network_clients( - node.state(), - reconfig_channel, - temp_dir.path(), - &Registry::new(), - ) - .await - .unwrap(); + let registry = Registry::new(); + // Start orchestrator inside container so that it will be properly shutdown. + let orchestrator = handle + .with_async(|node| { + TransactiondOrchestrator::new_with_network_clients( + node.state(), + node.subscribe_to_epoch_change(), + temp_dir.path(), + ®istry, + ) + }) + .await + .unwrap(); let txn_count = 4; let mut txns = context.batch_make_transfer_transactions(txn_count).await; @@ -48,7 +52,7 @@ async fn test_blocking_execution() -> Result<(), anyhow::Error> { .await?; // Wait for data sync to catch up - wait_for_tx(digest, node.state().clone()).await; + wait_for_tx(digest, handle.state().clone()).await; // Transaction Orchestrator proactivcely executes txn locally let txn = txns.swap_remove(0); @@ -66,7 +70,7 @@ async fn test_blocking_execution() -> Result<(), anyhow::Error> { let (_, _, executed_locally) = *result; assert!(executed_locally); - assert!(node + assert!(handle .state() .get_executed_transaction_and_effects(digest) .await @@ -83,19 +87,23 @@ async fn test_fullnode_wal_log() -> Result<(), anyhow::Error> { .build() .await?; - let node = &test_cluster.fullnode_handle.sui_node; + let handle = &test_cluster.fullnode_handle.sui_node; let temp_dir = tempfile::tempdir().unwrap(); - let reconfig_channel = node.subscribe_to_epoch_change(); tokio::task::yield_now().await; - let orchestrator = TransactiondOrchestrator::new_with_network_clients( - node.state(), - reconfig_channel, - temp_dir.path(), - &Registry::new(), - ) - .await - .unwrap(); + let registry = Registry::new(); + // Start orchestrator inside container so that it will be properly shutdown. + let orchestrator = handle + .with_async(|node| { + TransactiondOrchestrator::new_with_network_clients( + node.state(), + node.subscribe_to_epoch_change(), + temp_dir.path(), + ®istry, + ) + }) + .await + .unwrap(); let txn_count = 2; let context = &mut test_cluster.wallet; @@ -163,34 +171,30 @@ async fn test_fullnode_wal_log() -> Result<(), anyhow::Error> { async fn test_transaction_orchestrator_reconfig() { telemetry_subscribers::init_for_testing(); let test_cluster = TestClusterBuilder::new().build().await.unwrap(); - let epoch = test_cluster - .fullnode_handle - .sui_node - .transaction_orchestrator() - .unwrap() - .quorum_driver() - .current_epoch(); + let epoch = test_cluster.fullnode_handle.sui_node.with(|node| { + node.transaction_orchestrator() + .unwrap() + .quorum_driver() + .current_epoch() + }); assert_eq!(epoch, 0); test_cluster.trigger_reconfiguration().await; - let epoch = test_cluster - .fullnode_handle - .sui_node - .transaction_orchestrator() - .unwrap() - .quorum_driver() - .current_epoch(); + let epoch = test_cluster.fullnode_handle.sui_node.with(|node| { + node.transaction_orchestrator() + .unwrap() + .quorum_driver() + .current_epoch() + }); assert_eq!(epoch, 1); assert_eq!( - test_cluster - .fullnode_handle - .sui_node + test_cluster.fullnode_handle.sui_node.with(|node| node .clone_authority_aggregator() .unwrap() .committee - .epoch, + .epoch), 1 ); } @@ -221,8 +225,7 @@ async fn test_tx_across_epoch_boundaries() { let to = test_cluster .fullnode_handle .sui_node - .transaction_orchestrator() - .unwrap(); + .with(|node| node.transaction_orchestrator().unwrap()); let tx_digest = *tx.digest(); info!(?tx_digest, "Submitting tx"); diff --git a/crates/test-utils/Cargo.toml b/crates/test-utils/Cargo.toml index 98d476f3d81a7..4ae63d55d4c72 100644 --- a/crates/test-utils/Cargo.toml +++ b/crates/test-utils/Cargo.toml @@ -35,8 +35,6 @@ sui-test-transaction-builder = { path = "../sui-test-transaction-builder" } mysten-metrics = { path = "../mysten-metrics"} shared-crypto = { path = "../shared-crypto" } -mysten-network.workspace = true - move-binary-format.workspace = true move-core-types.workspace = true move-package.workspace = true diff --git a/crates/test-utils/src/authority.rs b/crates/test-utils/src/authority.rs index 15acfeca25f72..eece6517945b5 100644 --- a/crates/test-utils/src/authority.rs +++ b/crates/test-utils/src/authority.rs @@ -4,7 +4,6 @@ use mysten_metrics::RegistryService; use prometheus::Registry; use rand::{prelude::StdRng, SeedableRng}; -use std::net::IpAddr; use std::num::NonZeroUsize; use std::time::Duration; use sui_config::NodeConfig; @@ -12,7 +11,7 @@ use sui_core::authority_client::AuthorityAPI; use sui_core::authority_client::NetworkAuthorityClient; pub use sui_node::{SuiNode, SuiNodeHandle}; use sui_swarm_config::network_config::NetworkConfig; -use sui_swarm_config::network_config_builder::{ConfigBuilder, FullnodeConfigBuilder}; +use sui_swarm_config::network_config_builder::ConfigBuilder; use sui_types::base_types::ObjectID; use sui_types::messages_grpc::ObjectInfoRequest; use sui_types::multiaddr::Multiaddr; @@ -104,10 +103,10 @@ pub async fn start_node(config: &NodeConfig, registry_service: RegistryService) /// most of the time. #[cfg(msim)] pub async fn start_node(config: &NodeConfig, registry_service: RegistryService) -> SuiNodeHandle { - use std::net::SocketAddr; + use std::net::{IpAddr, SocketAddr}; let config = config.clone(); - let socket_addr = mysten_network::multiaddr::to_socket_addr(&config.network_address).unwrap(); + let socket_addr = config.network_address.to_socket_addr().unwrap(); let ip = match socket_addr { SocketAddr::V4(v4) => IpAddr::V4(*v4.ip()), _ => panic!("unsupported protocol"), @@ -145,29 +144,6 @@ pub async fn spawn_test_authorities(config: &NetworkConfig) -> Vec) -> SuiNodeHandle { - let registry_service = RegistryService::new(Registry::new()); - - let mut builder = FullnodeConfigBuilder::new(config); - - if cfg!(msim) { - let ip_addr: IpAddr = "11.10.0.0".to_string().parse().unwrap(); - builder = builder - .with_listen_ip(ip_addr) - .with_port(8080) - .with_p2p_port(8084) - .with_rpc_port(rpc_port.unwrap_or(9000)) - .with_admin_port(8888); - } else { - builder = builder.set_rpc_port(rpc_port); - } - - let fullnode_config = builder.build().unwrap(); - start_node(&fullnode_config, registry_service).await -} - /// Get a network client to communicate with the consensus. pub fn get_client(net_address: &Multiaddr) -> NetworkAuthorityClient { NetworkAuthorityClient::connect_lazy(net_address).unwrap() diff --git a/crates/test-utils/src/network.rs b/crates/test-utils/src/network.rs index afac687249ecd..26ae42becf9a5 100644 --- a/crates/test-utils/src/network.rs +++ b/crates/test-utils/src/network.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use futures::future::join_all; +use std::net::SocketAddr; use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; @@ -9,19 +10,12 @@ use std::time::Duration; use jsonrpsee::http_client::{HttpClient, HttpClientBuilder}; use jsonrpsee::ws_client::WsClient; use jsonrpsee::ws_client::WsClientBuilder; -use prometheus::Registry; use rand::{distributions::*, rngs::OsRng, seq::SliceRandom}; -use tokio::time::{timeout, Instant}; -use tokio::{task::JoinHandle, time::sleep}; -use tracing::info; - -use mysten_metrics::RegistryService; use sui_config::node::DBCheckpointConfig; use sui_config::{Config, SUI_CLIENT_CONFIG, SUI_NETWORK_CONFIG}; use sui_config::{NodeConfig, PersistedConfig, SUI_KEYSTORE_FILENAME}; use sui_json_rpc_types::{SuiTransactionBlockResponse, SuiTransactionBlockResponseOptions}; use sui_keys::keystore::{AccountKeystore, FileBasedKeystore, Keystore}; -use sui_node::SuiNode; use sui_node::SuiNodeHandle; use sui_protocol_config::{ProtocolVersion, SupportedProtocolVersions}; use sui_sdk::error::SuiRpcResult; @@ -31,8 +25,9 @@ use sui_sdk::{SuiClient, SuiClientBuilder}; use sui_swarm::memory::{Swarm, SwarmBuilder}; use sui_swarm_config::genesis_config::{AccountConfig, GenesisConfig}; use sui_swarm_config::network_config_builder::{ - FullnodeConfigBuilder, ProtocolVersionsConfig, SupportedProtocolVersionsCallback, + ProtocolVersionsConfig, SupportedProtocolVersionsCallback, }; +use sui_swarm_config::node_config_builder::FullnodeConfigBuilder; use sui_types::base_types::{AuthorityName, ObjectID, SuiAddress}; use sui_types::committee::EpochId; use sui_types::crypto::KeypairTraits; @@ -42,11 +37,14 @@ use sui_types::sui_system_state::epoch_start_sui_system_state::EpochStartSystemS use sui_types::sui_system_state::SuiSystemState; use sui_types::sui_system_state::SuiSystemStateTrait; use sui_types::transaction::VerifiedTransaction; +use tokio::time::{timeout, Instant}; +use tokio::{task::JoinHandle, time::sleep}; +use tracing::info; const NUM_VALIDAOTR: usize = 4; pub struct FullNodeHandle { - pub sui_node: Arc, + pub sui_node: SuiNodeHandle, pub sui_client: SuiClient, pub rpc_client: HttpClient, pub rpc_url: String, @@ -54,6 +52,30 @@ pub struct FullNodeHandle { pub ws_url: String, } +impl FullNodeHandle { + pub async fn new(sui_node: SuiNodeHandle, json_rpc_address: SocketAddr) -> Self { + let rpc_url = format!("http://{}", json_rpc_address); + let rpc_client = HttpClientBuilder::default().build(&rpc_url).unwrap(); + + let ws_url = format!("ws://{}", json_rpc_address); + let ws_client = WsClientBuilder::default().build(&ws_url).await.unwrap(); + let sui_client = SuiClientBuilder::default() + .ws_url(&ws_url) + .build(&rpc_url) + .await + .unwrap(); + + Self { + sui_node, + sui_client, + rpc_client, + rpc_url, + ws_client, + ws_url, + } + } +} + pub struct TestCluster { pub swarm: Swarm, pub accounts: Vec, @@ -112,22 +134,29 @@ impl TestCluster { } pub fn fullnode_config_builder(&self) -> FullnodeConfigBuilder { - FullnodeConfigBuilder::new(self.swarm.config()) + self.swarm.get_fullnode_config_builder() } /// Convenience method to start a new fullnode in the test cluster. - pub async fn start_fullnode(&self) -> Result { - let config = self.fullnode_config_builder().build().unwrap(); - start_fullnode_from_config(config).await + pub async fn start_fullnode(&mut self) -> FullNodeHandle { + self.start_fullnode_from_config( + self.fullnode_config_builder() + .build(&mut OsRng, self.swarm.config()), + ) + .await + } + + pub async fn start_fullnode_from_config(&mut self, config: NodeConfig) -> FullNodeHandle { + let json_rpc_address = config.json_rpc_address; + let node = self.swarm.spawn_new_fullnode(config).await; + FullNodeHandle::new(node, json_rpc_address).await } pub fn all_node_handles(&self) -> impl Iterator { self.swarm .validator_node_handles() .into_iter() - .chain(std::iter::once(SuiNodeHandle::new( - self.fullnode_handle.sui_node.clone(), - ))) + .chain(std::iter::once(self.fullnode_handle.sui_node.clone())) } pub fn get_validator_addresses(&self) -> Vec { @@ -138,6 +167,20 @@ impl TestCluster { self.swarm.validator(name).unwrap().stop(); } + pub async fn stop_all_validators(&self) { + info!("Stopping all validators in the cluster"); + self.swarm.validators().for_each(|v| v.stop()); + tokio::time::sleep(Duration::from_secs(3)).await; + } + + pub async fn start_all_validator(&self) { + info!("Starting all validators in the cluster"); + for v in self.swarm.validators() { + v.start().await.unwrap(); + } + tokio::time::sleep(Duration::from_secs(3)).await; + } + pub async fn start_validator(&self, name: AuthorityName) { let node = self.swarm.validator(name).unwrap(); if node.is_running() { @@ -161,10 +204,8 @@ impl TestCluster { pub async fn get_object_from_fullnode_store(&self, object_id: &ObjectID) -> Option { self.fullnode_handle .sui_node - .state() - .get_object(object_id) + .with_async(|node| async { node.state().get_object(object_id).await.unwrap() }) .await - .unwrap() } /// To detect whether the network has reached such state, we use the fullnode as the @@ -183,7 +224,10 @@ impl TestCluster { target_epoch: Option, timeout_dur: Duration, ) -> SuiSystemState { - let mut epoch_rx = self.fullnode_handle.sui_node.subscribe_to_epoch_change(); + let mut epoch_rx = self + .fullnode_handle + .sui_node + .with(|node| node.subscribe_to_epoch_change()); timeout(timeout_dur, async move { while let Ok(system_state) = epoch_rx.recv().await { info!("received epoch {}", system_state.epoch()); @@ -213,8 +257,7 @@ impl TestCluster { let cur_committee = self .fullnode_handle .sui_node - .state() - .clone_committee_for_testing(); + .with(|node| node.state().clone_committee_for_testing()); let mut cur_stake = 0; for handle in self.swarm.validator_node_handles() { handle @@ -405,7 +448,7 @@ impl TestClusterBuilder { } } - pub fn set_fullnode_rpc_port(mut self, rpc_port: u16) -> Self { + pub fn with_fullnode_rpc_port(mut self, rpc_port: u16) -> Self { self.fullnode_rpc_port = Some(rpc_port); self } @@ -515,19 +558,10 @@ impl TestClusterBuilder { let mut wallet_conf: SuiClientConfig = PersistedConfig::read(&working_dir.join(SUI_CLIENT_CONFIG))?; - let fullnode_config = FullnodeConfigBuilder::new(swarm.config()) - .with_supported_protocol_versions_config( - self.fullnode_supported_protocol_versions_config - .clone() - .unwrap_or_else(|| self.validator_supported_protocol_versions_config.clone()), - ) - .with_db_checkpoint_config(self.db_checkpoint_config_fullnodes) - .set_event_store(self.enable_fullnode_events) - .set_rpc_port(self.fullnode_rpc_port) - .build() - .unwrap(); - - let fullnode_handle = start_fullnode_from_config(fullnode_config).await?; + let fullnode = swarm.fullnodes().next().unwrap(); + let json_rpc_address = fullnode.config.json_rpc_address; + let fullnode_handle = + FullNodeHandle::new(fullnode.get_node_handle().unwrap(), json_rpc_address).await; wallet_conf.envs.push(SuiEnv { alias: "localnet".to_string(), @@ -563,11 +597,21 @@ impl TestClusterBuilder { .with_db_checkpoint_config(self.db_checkpoint_config_validators.clone()) .with_supported_protocol_versions_config( self.validator_supported_protocol_versions_config.clone(), - ); + ) + .with_fullnode_count(1) + .with_fullnode_supported_protocol_versions_config( + self.fullnode_supported_protocol_versions_config + .clone() + .unwrap_or(self.validator_supported_protocol_versions_config.clone()), + ) + .with_db_checkpoint_config(self.db_checkpoint_config_fullnodes.clone()); if let Some(genesis_config) = self.genesis_config.take() { builder = builder.with_genesis_config(genesis_config); } + if let Some(fullnode_rpc_port) = self.fullnode_rpc_port { + builder = builder.with_fullnode_rpc_port(fullnode_rpc_port); + } let mut swarm = builder.build(); swarm.launch().await?; @@ -613,32 +657,6 @@ impl Default for TestClusterBuilder { } } -pub async fn start_fullnode_from_config( - config: NodeConfig, -) -> Result { - let registry_service = RegistryService::new(Registry::new()); - let sui_node = SuiNode::start(&config, registry_service, None).await?; - - let rpc_url = format!("http://{}", config.json_rpc_address); - let rpc_client = HttpClientBuilder::default().build(&rpc_url)?; - - let ws_url = format!("ws://{}", config.json_rpc_address); - let ws_client = WsClientBuilder::default().build(&ws_url).await?; - let sui_client = SuiClientBuilder::default() - .ws_url(&ws_url) - .build(&rpc_url) - .await?; - - Ok(FullNodeHandle { - sui_node, - sui_client, - rpc_client, - rpc_url, - ws_client, - ws_url, - }) -} - // TODO: Merge the following functions with the ones inside TestCluster. pub async fn wait_for_node_transition_to_epoch(node: &SuiNodeHandle, expected_epoch: EpochId) { node.with_async(|node| async move { diff --git a/narwhal/node/src/metrics.rs b/narwhal/node/src/metrics.rs index 34c5ae775ac1c..bbc5f1dc9d56d 100644 --- a/narwhal/node/src/metrics.rs +++ b/narwhal/node/src/metrics.rs @@ -3,7 +3,7 @@ use axum::{http::StatusCode, routing::get, Extension, Router}; use config::{AuthorityIdentifier, WorkerId}; use mysten_metrics::spawn_logged_monitored_task; -use mysten_network::multiaddr::{to_socket_addr, Multiaddr}; +use mysten_network::multiaddr::Multiaddr; use prometheus::{Registry, TextEncoder}; use std::collections::HashMap; use tokio::task::JoinHandle; @@ -37,7 +37,9 @@ pub fn start_prometheus_server(addr: Multiaddr, registry: &Registry) -> JoinHand .route(METRICS_ROUTE, get(metrics)) .layer(Extension(registry.clone())); - let socket_addr = to_socket_addr(&addr).expect("failed to convert Multiaddr to SocketAddr"); + let socket_addr = addr + .to_socket_addr() + .expect("failed to convert Multiaddr to SocketAddr"); spawn_logged_monitored_task!( async move {