diff --git a/.github/actions/dockerfiles/Dockerfile.debian-source b/.github/actions/dockerfiles/Dockerfile.debian-source index b8da585fe2..80c434e8d5 100644 --- a/.github/actions/dockerfiles/Dockerfile.debian-source +++ b/.github/actions/dockerfiles/Dockerfile.debian-source @@ -24,5 +24,5 @@ RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out FROM --platform=${TARGETPLATFORM} debian:bookworm -COPY --from=build /out/stacks-node /out/stacks-signer /bin/ +COPY --from=build /out/stacks-node /out/stacks-signer /out/stacks-inspect /bin/ CMD ["stacks-node", "mainnet"] diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 23eed46f1e..04e74f94e8 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -99,6 +99,8 @@ jobs: - tests::signer::v0::forked_tenure_okay - tests::signer::v0::forked_tenure_invalid - tests::signer::v0::empty_sortition + - tests::signer::v0::empty_sortition_before_approval + - tests::signer::v0::empty_sortition_before_proposal - tests::signer::v0::bitcoind_forking_test - tests::signer::v0::multiple_miners - tests::signer::v0::mock_sign_epoch_25 @@ -112,6 +114,7 @@ jobs: - tests::signer::v0::locally_accepted_blocks_overriden_by_global_rejection - tests::signer::v0::locally_rejected_blocks_overriden_by_global_acceptance - tests::signer::v0::reorg_locally_accepted_blocks_across_tenures_succeeds + - tests::signer::v0::reorg_locally_accepted_blocks_across_tenures_fails - tests::signer::v0::miner_recovers_when_broadcast_block_delay_across_tenures_occurs - tests::signer::v0::multiple_miners_with_nakamoto_blocks - tests::signer::v0::partial_tenure_fork @@ -120,6 +123,10 @@ jobs: - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend - tests::signer::v0::multiple_miners_with_custom_chain_id + - tests::signer::v0::block_commit_delay + - tests::signer::v0::continue_after_fast_block_no_sortition + - tests::signer::v0::block_validation_response_timeout + - tests::signer::v0::tenure_extend_after_bad_commit - tests::nakamoto_integrations::burn_ops_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state @@ -133,6 +140,10 @@ jobs: - tests::nakamoto_integrations::utxo_check_on_startup_panic - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::nakamoto_integrations::v3_signer_api_endpoint + - tests::nakamoto_integrations::test_shadow_recovery + - tests::nakamoto_integrations::signer_chainstate + - tests::nakamoto_integrations::clarity_cost_spend_down + - tests::nakamoto_integrations::v3_blockbyheight_api_endpoint # TODO: enable these once v1 signer is supported by a new nakamoto epoch # - tests::signer::v1::dkg # - tests::signer::v1::sign_request_rejected diff --git a/.github/workflows/p2p-tests.yml b/.github/workflows/p2p-tests.yml index a8346e2948..1c33eca0fb 100644 --- a/.github/workflows/p2p-tests.yml +++ b/.github/workflows/p2p-tests.yml @@ -42,6 +42,11 @@ jobs: - net::tests::convergence::test_walk_star_15_pingback - net::tests::convergence::test_walk_star_15_org_biased - net::tests::convergence::test_walk_inbound_line_15 + - net::api::tests::postblock_proposal::test_try_make_response + - net::server::tests::test_http_10_threads_getinfo + - net::server::tests::test_http_10_threads_getblock + - net::server::tests::test_http_too_many_clients + - net::server::tests::test_http_slow_client steps: ## Setup test environment - name: Setup Test Environment diff --git a/CHANGELOG.md b/CHANGELOG.md index fe5e200d17..046ca667a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,48 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +### Added + +### Changed + + +## [3.0.0.0.3] + +### Added + +### Changed +- Add index for StacksBlockId to nakamoto block headers table (improves node performance) +- Remove the panic for reporting DB deadlocks (just error and continue waiting) +- Add index to `metadata_table` in Clarity DB on `blockhash` +- Add `block_commit_delay_ms` to the config file to control the time to wait after seeing a new burn block, before submitting a block commit, to allow time for the first Nakamoto block of the new tenure to be mined, allowing this miner to avoid the need to RBF the block commit. +- Add `tenure_cost_limit_per_block_percentage` to the miner config file to control the percentage remaining tenure cost limit to consume per nakamoto block. +- Add `/v3/blocks/height/:block_height` rpc endpoint +- If the winning miner of a sortition is committed to the wrong parent tenure, the previous miner can immediately tenure extend and continue mining since the winning miner would never be able to propose a valid block. (#5361) + +## [3.0.0.0.2] + +### Added + +### Changed +- Fixes a few bugs in the relayer and networking stack + - detects and deprioritizes unhealthy replicas + - fixes an issue in the p2p stack which was preventing it from caching the reward set. + +## [3.0.0.0.1] + +### Changed +- Add index for StacksBlockId to nakamoto block headers table (improves node performance) +- Remove the panic for reporting DB deadlocks (just error and continue waiting) +- Various test fixes for CI (5353, 5368, 5372, 5371, 5380, 5378, 5387, 5396, 5390, 5394) +- Various log fixes: + - don't say proceeding to mine blocks if not a miner + - misc. warns downgraded to debugs +- 5391: Update default block proposal timeout to 10 minutes +- 5406: After block rejection, miner pauses +- Docs fixes + - Fix signer docs link + - Specify burn block in clarity docs + ## [3.0.0.0.0] ### Added diff --git a/Cargo.lock b/Cargo.lock index 227cd9d768..8a3769b6a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3328,6 +3328,7 @@ dependencies = [ "stackslib", "stx-genesis", "tempfile", + "thiserror", "tikv-jemallocator", "tiny_http", "tokio", @@ -3592,18 +3593,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 10dc427e2e..c00c223c47 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } +thiserror = { version = "1.0.65" } # Use a bit more than default optimization for # dev builds to speed up test execution diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 0751822ed0..b3ee746fcf 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -896,6 +896,7 @@ impl LimitedCostTracker { Self::Free => ExecutionCost::max_value(), } } + pub fn get_memory(&self) -> u64 { match self { Self::Limited(TrackerData { memory, .. }) => *memory, @@ -1170,6 +1171,7 @@ pub trait CostOverflowingMath { fn cost_overflow_mul(self, other: T) -> Result; fn cost_overflow_add(self, other: T) -> Result; fn cost_overflow_sub(self, other: T) -> Result; + fn cost_overflow_div(self, other: T) -> Result; } impl CostOverflowingMath for u64 { @@ -1185,6 +1187,10 @@ impl CostOverflowingMath for u64 { self.checked_sub(other) .ok_or_else(|| CostErrors::CostOverflow) } + fn cost_overflow_div(self, other: u64) -> Result { + self.checked_div(other) + .ok_or_else(|| CostErrors::CostOverflow) + } } impl ExecutionCost { @@ -1293,6 +1299,15 @@ impl ExecutionCost { Ok(()) } + pub fn divide(&mut self, divisor: u64) -> Result<()> { + self.runtime = self.runtime.cost_overflow_div(divisor)?; + self.read_count = self.read_count.cost_overflow_div(divisor)?; + self.read_length = self.read_length.cost_overflow_div(divisor)?; + self.write_length = self.write_length.cost_overflow_div(divisor)?; + self.write_count = self.write_count.cost_overflow_div(divisor)?; + Ok(()) + } + /// Returns whether or not this cost exceeds any dimension of the /// other cost. pub fn exceeds(&self, other: &ExecutionCost) -> bool { diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index 7d2af59eb5..0e0f0e3f6e 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -248,6 +248,12 @@ impl SqliteConnection { ) .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; + conn.execute( + "CREATE INDEX IF NOT EXISTS md_blockhashes ON metadata_table(blockhash)", + NO_PARAMS, + ) + .map_err(|x| InterpreterError::SqliteError(IncomparableError { err: x }))?; + Self::check_schema(conn)?; Ok(()) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index d718ff5366..9075c55e71 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -1775,17 +1775,17 @@ this value is less than or equal to the value for `miner-spend-total` at the sam const GET_BURN_BLOCK_INFO_API: SpecialAPI = SpecialAPI { input_type: "BurnBlockInfoPropertyName, uint", output_type: "(optional buff) | (optional (tuple (addrs (list 2 (tuple (hashbytes (buff 32)) (version (buff 1))))) (payout uint)))", - snippet: "get-burn-block-info? ${1:prop} ${2:block-height}", - signature: "(get-burn-block-info? prop-name block-height)", + snippet: "get-burn-block-info? ${1:prop} ${2:burn-block-height}", + signature: "(get-burn-block-info? prop-name burn-block-height)", description: "The `get-burn-block-info?` function fetches data for a block of the given *burnchain* block height. The -value and type returned are determined by the specified `BlockInfoPropertyName`. Valid values for `block-height` only +value and type returned are determined by the specified `BlockInfoPropertyName`. Valid values for `burn-block-height` only include heights between the burnchain height at the time the Stacks chain was launched, and the last-processed burnchain -block. If the `block-height` argument falls outside of this range, then `none` shall be returned. +block. If the `burn-block-height` argument falls outside of this range, then `none` shall be returned. The following `BlockInfoPropertyName` values are defined: * The `header-hash` property returns a 32-byte buffer representing the header hash of the burnchain block at -burnchain height `block-height`. +burnchain height `burn-block-height`. * The `pox-addrs` property returns a tuple with two items: a list of up to two PoX addresses that received a PoX payout at that block height, and the amount of burnchain tokens paid to each address (note that per the blockchain consensus rules, each PoX payout will be the same for each address in the block-commit transaction). @@ -1811,11 +1811,11 @@ The `addrs` list contains the same PoX address values passed into the PoX smart const GET_STACKS_BLOCK_INFO_API: SpecialAPI = SpecialAPI { input_type: "StacksBlockInfoPropertyName, uint", - snippet: "get-stacks-block-info? ${1:prop} ${2:block-height}", + snippet: "get-stacks-block-info? ${1:prop} ${2:stacks-block-height}", output_type: "(optional buff) | (optional uint)", - signature: "(get-stacks-block-info? prop-name block-height)", + signature: "(get-stacks-block-info? prop-name stacks-block-height)", description: "The `get-stacks-block-info?` function fetches data for a block of the given *Stacks* block height. The -value and type returned are determined by the specified `StacksBlockInfoPropertyName`. If the provided `block-height` does +value and type returned are determined by the specified `StacksBlockInfoPropertyName`. If the provided `stacks-block-height` does not correspond to an existing block prior to the current block, the function returns `none`. The currently available property names are as follows: @@ -1840,11 +1840,11 @@ the mining of this block started, but is not guaranteed to be accurate. This tim const GET_TENURE_INFO_API: SpecialAPI = SpecialAPI { input_type: "TenureInfoPropertyName, uint", - snippet: "get-tenure-info? ${1:prop} ${2:block-height}", + snippet: "get-tenure-info? ${1:prop} ${2:stacks-block-height}", output_type: "(optional buff) | (optional uint)", - signature: "(get-tenure-info? prop-name block-height)", + signature: "(get-tenure-info? prop-name stacks-block-height)", description: "The `get-tenure-info?` function fetches data for the tenure at the given block height. The -value and type returned are determined by the specified `TenureInfoPropertyName`. If the provided `block-height` does +value and type returned are determined by the specified `TenureInfoPropertyName`. If the provided `stacks-block-height` does not correspond to an existing block prior to the current block, the function returns `none`. The currently available property names are as follows: diff --git a/docs/mining.md b/docs/mining.md index 34a299cd1c..10f49c5620 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -19,14 +19,26 @@ nakamoto_attempt_time_ms = 20000 [burnchain] # Maximum amount (in sats) of "burn commitment" to broadcast for the next block's leader election burn_fee_cap = 20000 -# Amount (in sats) per byte - Used to calculate the transaction fees -satoshis_per_byte = 25 -# Amount of sats to add when RBF'ing bitcoin tx (default: 5) +# Amount in sats per byte used to calculate the Bitcoin transaction fee (default: 50) +satoshis_per_byte = 50 +# Amount of sats per byte to add when RBF'ing a Bitcoin tx (default: 5) rbf_fee_increment = 5 -# Maximum percentage to RBF bitcoin tx (default: 150% of satsv/B) +# Maximum percentage of satoshis_per_byte to allow in RBF fee (default: 150) max_rbf = 150 ``` +NOTE: Ensuring that your miner can successfully use RBF (Replace-by-Fee) is +critical for reliable block production. If a miner fails to replace an outdated +block commit with a higher-fee transaction, it risks committing to an incorrect +tenure. This would prevent the miner from producing valid blocks during its +tenure, as it would be building on an invalid chain tip, causing the signers to +reject its blocks. + +To avoid this, configure satoshis_per_byte, rbf_fee_increment, and max_rbf to +allow for at least three fee increments within the max_rbf limit. This helps +ensure that your miner can adjust its fees sufficiently to stay on the canonical +chain. + You can verify that your node is operating as a miner by checking its log output to verify that it was able to find its Bitcoin UTXOs: diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 9f0e09fd20..94a5479613 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -503,6 +503,17 @@ data. This will return 404 if the block does not exist. +### GET /v3/blocks/height/[Block Height] + +Fetch a Nakamoto block given its block height. This returns the raw block +data. + +This will return 404 if the block does not exist. + +This endpoint also accepts a querystring parameter `?tip=` which when supplied +will return the block relative to the specified tip allowing the querying of +sibling blocks (same height, different tip) too. + ### GET /v3/tenures/[Block ID] Fetch a Nakamoto block and all of its ancestors in the same tenure, given its diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index c4dd06721c..db36da8bac 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -627,6 +627,40 @@ paths: content: application/text-plain: {} + /v3/blocks/height/{block_height}: + get: + summary: Fetch a Nakamoto block by its height and optional tip + tags: + - Blocks + operationId: get_block_v3_by_height + description: + Fetch a Nakamoto block by its height and optional tip. + parameters: + - name: block_height + in: path + description: The block's height + required: true + schema: + type: integer + - name: tip + in: query + schema: + type: string + description: The Stacks chain tip to query from. If tip == latest or empty, the query will be run + from the latest known tip. + responses: + "200": + description: The raw SIP-003-encoded block will be returned. + content: + application/octet-stream: + schema: + type: string + format: binary + "404": + description: The block could not be found + content: + application/text-plain: {} + /v3/tenures/info: get: summary: Fetch metadata about the ongoing Nakamoto tenure diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index 63241d3256..7c472365a1 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -30,7 +30,7 @@ slog-term = "2.6.0" slog-json = { version = "2.3.0", optional = true } stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib"} -thiserror = "1.0" +thiserror = { workspace = true } tiny_http = "0.12" [dev-dependencies] diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 23f2b006db..8089c6c0a1 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -1,5 +1,6 @@ use std::cmp::Ordering; use std::fmt; +use std::ops::{Deref, DerefMut, Index, IndexMut}; #[cfg(feature = "canonical")] pub mod sqlite; @@ -169,6 +170,21 @@ impl StacksEpochId { } } + /// Whether or not this epoch supports shadow blocks + pub fn supports_shadow_blocks(&self) -> bool { + match self { + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => false, + StacksEpochId::Epoch30 => true, + } + } + /// Does this epoch support unlocking PoX contributors that miss a slot? /// /// Epoch 2.0 - 2.05 didn't support this feature, but they weren't epoch-guarded on it. Instead, @@ -460,3 +476,83 @@ impl Ord for StacksEpoch { self.epoch_id.cmp(&other.epoch_id) } } + +/// A wrapper for holding a list of Epochs, indexable by StacksEpochId +#[derive(Clone, Debug, Default, Deserialize, PartialEq, Eq)] +pub struct EpochList(Vec>); + +impl EpochList { + pub fn new(epochs: &[StacksEpoch]) -> EpochList { + EpochList(epochs.to_vec()) + } + + pub fn get(&self, index: StacksEpochId) -> Option<&StacksEpoch> { + self.0.get(StacksEpoch::find_epoch_by_id(&self.0, index)?) + } + + pub fn get_mut(&mut self, index: StacksEpochId) -> Option<&mut StacksEpoch> { + let index = StacksEpoch::find_epoch_by_id(&self.0, index)?; + self.0.get_mut(index) + } + + /// Truncates the list after the given epoch id + pub fn truncate_after(&mut self, epoch_id: StacksEpochId) { + if let Some(index) = StacksEpoch::find_epoch_by_id(&self.0, epoch_id) { + self.0.truncate(index + 1); + } + } + + /// Determine which epoch, if any, a given burnchain height falls into. + pub fn epoch_id_at_height(&self, height: u64) -> Option { + StacksEpoch::find_epoch(self, height).map(|idx| self.0[idx].epoch_id) + } + + /// Determine which epoch, if any, a given burnchain height falls into. + pub fn epoch_at_height(&self, height: u64) -> Option> { + StacksEpoch::find_epoch(self, height).map(|idx| self.0[idx].clone()) + } + + /// Pushes a new `StacksEpoch` to the end of the list + pub fn push(&mut self, epoch: StacksEpoch) { + if let Some(last) = self.0.last() { + assert!( + epoch.start_height == last.end_height && epoch.epoch_id > last.epoch_id, + "Epochs must be pushed in order" + ); + } + self.0.push(epoch); + } + + pub fn to_vec(&self) -> Vec> { + self.0.clone() + } +} + +impl Index for EpochList { + type Output = StacksEpoch; + fn index(&self, index: StacksEpochId) -> &StacksEpoch { + self.get(index) + .expect("Invalid StacksEpochId: could not find corresponding epoch") + } +} + +impl IndexMut for EpochList { + fn index_mut(&mut self, index: StacksEpochId) -> &mut StacksEpoch { + self.get_mut(index) + .expect("Invalid StacksEpochId: could not find corresponding epoch") + } +} + +impl Deref for EpochList { + type Target = [StacksEpoch]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for EpochList { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index 89fe4677c7..53564af597 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -51,26 +51,25 @@ pub fn update_lock_table(conn: &Connection) { /// Called by `rusqlite` if we are waiting too long on a database lock /// If called too many times, will assume a deadlock and panic pub fn tx_busy_handler(run_count: i32) -> bool { - const TIMEOUT: Duration = Duration::from_secs(300); const AVG_SLEEP_TIME_MS: u64 = 100; + // Every ~5min, report an error with a backtrace + // 5min * 60s/min * 1_000ms/s / 100ms + const ERROR_COUNT: u32 = 3_000; + // First, check if this is taking unreasonably long. If so, it's probably a deadlock let run_count = run_count.unsigned_abs(); - let approx_time_elapsed = - Duration::from_millis(AVG_SLEEP_TIME_MS.saturating_mul(u64::from(run_count))); - if approx_time_elapsed > TIMEOUT { - error!("Deadlock detected. Waited {} seconds (estimated) for database lock. Giving up", approx_time_elapsed.as_secs(); + if run_count > 0 && run_count % ERROR_COUNT == 0 { + error!("Deadlock detected. Waited 5 minutes (estimated) for database lock."; "run_count" => run_count, "backtrace" => ?Backtrace::capture() ); for (k, v) in LOCK_TABLE.lock().unwrap().iter() { error!("Database '{k}' last locked by {v}"); } - panic!("Deadlock in thread {:?}", thread::current().name()); } let mut sleep_time_ms = 2u64.saturating_pow(run_count); - sleep_time_ms = sleep_time_ms.saturating_add(thread_rng().gen_range(0..sleep_time_ms)); if sleep_time_ms > AVG_SLEEP_TIME_MS { diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index 9d52f0dbbf..534f3f9969 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -215,14 +215,13 @@ fn make_json_logger() -> Logger { panic!("Tried to construct JSON logger, but stacks-blockchain built without slog_json feature enabled.") } -#[cfg(not(any(test, feature = "testing")))] fn make_logger() -> Logger { if env::var("STACKS_LOG_JSON") == Ok("1".into()) { make_json_logger() } else { let debug = env::var("STACKS_LOG_DEBUG") == Ok("1".into()); let pretty_print = env::var("STACKS_LOG_PP") == Ok("1".into()); - let decorator = slog_term::PlainSyncDecorator::new(std::io::stderr()); + let decorator = get_decorator(); let atty = isatty(Stream::Stderr); let drain = TermFormat::new(decorator, pretty_print, debug, atty); let logger = Logger::root(drain.ignore_res(), o!()); @@ -231,17 +230,13 @@ fn make_logger() -> Logger { } #[cfg(any(test, feature = "testing"))] -fn make_logger() -> Logger { - if env::var("STACKS_LOG_JSON") == Ok("1".into()) { - make_json_logger() - } else { - let debug = env::var("STACKS_LOG_DEBUG") == Ok("1".into()); - let plain = slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter); - let isatty = isatty(Stream::Stdout); - let drain = TermFormat::new(plain, false, debug, isatty); - let logger = Logger::root(drain.ignore_res(), o!()); - logger - } +fn get_decorator() -> slog_term::PlainSyncDecorator { + slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter) +} + +#[cfg(not(any(test, feature = "testing")))] +fn get_decorator() -> slog_term::PlainSyncDecorator { + slog_term::PlainSyncDecorator::new(std::io::stderr()) } fn inner_get_loglevel() -> slog::Level { diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 489fd39cf7..46e25b285f 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,7 +11,38 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed -## [3.0.0.0.0] +## [3.0.0.0.3.0] + +### Added + +### Changed + +- Allow a miner to extend their tenure immediately if the winner of the next tenure has committed to the wrong parent tenure (#5361) + +## [3.0.0.0.2.0] + +### Added +- Adds `tenure_last_block_proposal_timeout_secs` option to account for delayed global block acceptance. default to 30s + +### Changed + +## [3.0.0.0.1.0] + +### Added + +### Changed + +- Change block rejection message to generic block response + +## [3.0.0.0.0.1] + +### Added + +### Changed +- Update block proposal timeout default to 10 minutes (#5391) +- Updated documentation link in output (#5363) + +## [3.0.0.0.0.0] ### Added diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index da94cc10de..139c34fba8 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -38,7 +38,7 @@ slog-json = { version = "2.3.0", optional = true } slog-term = "2.6.0" stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib" } -thiserror = "1.0" +thiserror = { workspace = true } tiny_http = { version = "0.12", optional = true } toml = "0.5.6" tracing = "0.1.37" diff --git a/stacks-signer/src/chainstate.rs b/stacks-signer/src/chainstate.rs index 44ae11b252..9fb5c496c8 100644 --- a/stacks-signer/src/chainstate.rs +++ b/stacks-signer/src/chainstate.rs @@ -19,15 +19,15 @@ use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::TenureChangePayload; use blockstack_lib::net::api::getsortition::SortitionInfo; use blockstack_lib::util_lib::db::Error as DBError; -use clarity::types::chainstate::BurnchainHeaderHash; use slog::{slog_info, slog_warn}; -use stacks_common::types::chainstate::{ConsensusHash, StacksPublicKey}; +use stacks_common::types::chainstate::{BurnchainHeaderHash, ConsensusHash, StacksPublicKey}; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Hash160; use stacks_common::{info, warn}; use crate::client::{ClientError, CurrentAndLastSortition, StacksClient}; use crate::config::SignerConfig; -use crate::signerdb::{BlockState, SignerDb}; +use crate::signerdb::{BlockInfo, BlockState, SignerDb}; #[derive(thiserror::Error, Debug)] /// Error type for the signer chainstate module @@ -119,6 +119,9 @@ pub struct ProposalEvalConfig { pub first_proposal_burn_block_timing: Duration, /// Time between processing a sortition and proposing a block before the block is considered invalid pub block_proposal_timeout: Duration, + /// Time to wait for the last block of a tenure to be globally accepted or rejected before considering + /// a new miner's block at the same height as valid. + pub tenure_last_block_proposal_timeout: Duration, } impl From<&SignerConfig> for ProposalEvalConfig { @@ -126,6 +129,7 @@ impl From<&SignerConfig> for ProposalEvalConfig { Self { first_proposal_burn_block_timing: value.first_proposal_burn_block_timing, block_proposal_timeout: value.block_proposal_timeout, + tenure_last_block_proposal_timeout: value.tenure_last_block_proposal_timeout, } } } @@ -199,7 +203,40 @@ impl SortitionsView { "current_sortition_consensus_hash" => ?self.cur_sortition.consensus_hash, ); self.cur_sortition.miner_status = SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } else if let Some(tip) = signer_db.get_canonical_tip()? { + // Check if the current sortition is aligned with the expected tenure: + // - If the tip is in the current tenure, we are in the process of mining this tenure. + // - If the tip is not in the current tenure, then we’re starting a new tenure, + // and the current sortition's parent tenure must match the tenure of the tip. + // - If the tip is not building off of the current sortition's parent tenure, then + // check to see if the tip's parent is within the first proposal burn block timeout, + // which allows for forks when a burn block arrives quickly. + // - Else the miner of the current sortition has committed to an incorrect parent tenure. + let consensus_hash_match = + self.cur_sortition.consensus_hash == tip.block.header.consensus_hash; + let parent_tenure_id_match = + self.cur_sortition.parent_tenure_id == tip.block.header.consensus_hash; + if !consensus_hash_match && !parent_tenure_id_match { + // More expensive check, so do it only if we need to. + let is_valid_parent_tenure = Self::check_parent_tenure_choice( + &self.cur_sortition, + block, + signer_db, + client, + &self.config.first_proposal_burn_block_timing, + )?; + if !is_valid_parent_tenure { + warn!( + "Current sortition does not build off of canonical tip tenure, marking as invalid"; + "current_sortition_parent" => ?self.cur_sortition.parent_tenure_id, + "tip_consensus_hash" => ?tip.block.header.consensus_hash, + ); + self.cur_sortition.miner_status = + SortitionMinerStatus::InvalidatedBeforeFirstBlock; + } + } } + if let Some(last_sortition) = self.last_sortition.as_mut() { if last_sortition.is_timed_out(self.config.block_proposal_timeout, signer_db)? { info!( @@ -300,6 +337,7 @@ impl SortitionsView { "Miner block proposal is from last sortition winner, when the new sortition winner is still valid. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), + "current_sortition_miner_status" => ?self.cur_sortition.miner_status, ); return Ok(false); } @@ -435,6 +473,8 @@ impl SortitionsView { "violating_tenure_proposed_time" => local_block_info.proposed_time, "new_tenure_received_time" => sortition_state_received_time, "new_tenure_burn_timestamp" => sortition_state.burn_header_timestamp, + "first_proposal_burn_block_timing_secs" => first_proposal_burn_block_timing.as_secs(), + "proposal_to_sortition" => proposal_to_sortition, ); continue; } @@ -460,7 +500,36 @@ impl SortitionsView { Ok(true) } - /// Check if the tenure change block confirms the expected parent block (i.e., the last globally accepted block in the parent tenure) + /// Get the last block from the given tenure + /// Returns the last locally accepted block if it is not timed out, otherwise it will return the last globally accepted block. + fn get_tenure_last_block_info( + consensus_hash: &ConsensusHash, + signer_db: &SignerDb, + tenure_last_block_proposal_timeout: Duration, + ) -> Result, ClientError> { + // Get the last known block in the previous tenure + let last_locally_accepted_block = signer_db + .get_last_accepted_block(consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + + if let Some(local_info) = last_locally_accepted_block { + if let Some(signed_over_time) = local_info.signed_self { + if signed_over_time + tenure_last_block_proposal_timeout.as_secs() + > get_epoch_time_secs() + { + // The last locally accepted block is not timed out, return it + return Ok(Some(local_info)); + } + } + } + // The last locally accepted block is timed out, get the last globally accepted block + signer_db + .get_last_globally_accepted_block(consensus_hash) + .map_err(|e| ClientError::InvalidResponse(e.to_string())) + } + + /// Check if the tenure change block confirms the expected parent block + /// (i.e., the last locally accepted block in the parent tenure, or if that block is timed out, the last globally accepted block in the parent tenure) /// It checks the local DB first, and if the block is not present in the local DB, it asks the /// Stacks node for the highest processed block header in the given tenure (and then caches it /// in the DB). @@ -473,24 +542,27 @@ impl SortitionsView { reward_cycle: u64, signer_db: &mut SignerDb, client: &StacksClient, + tenure_last_block_proposal_timeout: Duration, ) -> Result { - // If the tenure change block confirms the expected parent block, it should confirm at least one more block than the last globally accepted block in the parent tenure. - let last_globally_accepted_block = signer_db - .get_last_globally_accepted_block(&tenure_change.prev_tenure_consensus_hash) - .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; + // If the tenure change block confirms the expected parent block, it should confirm at least one more block than the last accepted block in the parent tenure. + let last_block_info = Self::get_tenure_last_block_info( + &tenure_change.prev_tenure_consensus_hash, + signer_db, + tenure_last_block_proposal_timeout, + )?; - if let Some(global_info) = last_globally_accepted_block { + if let Some(info) = last_block_info { // N.B. this block might not be the last globally accepted block across the network; // it's just the highest one in this tenure that we know about. If this given block is // no higher than it, then it's definitely no higher than the last globally accepted // block across the network, so we can do an early rejection here. - if block.header.chain_length <= global_info.block.header.chain_length { + if block.header.chain_length <= info.block.header.chain_length { warn!( "Miner's block proposal does not confirm as many blocks as we expect"; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), "proposed_chain_length" => block.header.chain_length, - "expected_at_least" => global_info.block.header.chain_length + 1, + "expected_at_least" => info.block.header.chain_length + 1, ); return Ok(false); } @@ -558,6 +630,7 @@ impl SortitionsView { reward_cycle, signer_db, client, + self.config.tenure_last_block_proposal_timeout, )?; if !confirms_expected_parent { return Ok(false); @@ -573,15 +646,15 @@ impl SortitionsView { if !is_valid_parent_tenure { return Ok(false); } - let last_in_tenure = signer_db + let last_in_current_tenure = signer_db .get_last_globally_accepted_block(&block.header.consensus_hash) .map_err(|e| ClientError::InvalidResponse(e.to_string()))?; - if let Some(last_in_tenure) = last_in_tenure { + if let Some(last_in_current_tenure) = last_in_current_tenure { warn!( "Miner block proposal contains a tenure change, but we've already signed a block in this tenure. Considering proposal invalid."; "proposed_block_consensus_hash" => %block.header.consensus_hash, "proposed_block_signer_sighash" => %block.header.signer_signature_hash(), - "last_in_tenure_signer_sighash" => %last_in_tenure.block.header.signer_signature_hash(), + "last_in_tenure_signer_sighash" => %last_in_current_tenure.block.header.signer_signature_hash(), ); return Ok(false); } diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 9885182d98..37706368dc 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -411,6 +411,8 @@ pub(crate) mod tests { db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, + tenure_last_block_proposal_timeout: config.tenure_last_block_proposal_timeout, + block_proposal_validation_timeout: config.block_proposal_validation_timeout, } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index c0514274e1..57c90ab0eb 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -34,8 +34,10 @@ use stacks_common::util::hash::Hash160; use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; -const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 45_000; +const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 600_000; +const BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS: u64 = 120_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; +const DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS: u64 = 30; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -128,6 +130,11 @@ pub struct SignerConfig { pub first_proposal_burn_block_timing: Duration, /// How much time to wait for a miner to propose a block following a sortition pub block_proposal_timeout: Duration, + /// Time to wait for the last block of a tenure to be globally accepted or rejected + /// before considering a new miner's block at the same height as potentially valid. + pub tenure_last_block_proposal_timeout: Duration, + /// How much time to wait for a block proposal validation response before marking the block invalid + pub block_proposal_validation_timeout: Duration, } /// The parsed configuration for the signer @@ -158,6 +165,12 @@ pub struct GlobalConfig { pub block_proposal_timeout: Duration, /// An optional custom Chain ID pub chain_id: Option, + /// Time to wait for the last block of a tenure to be globally accepted or rejected + /// before considering a new miner's block at the same height as potentially valid. + pub tenure_last_block_proposal_timeout: Duration, + /// How long to wait for a response from a block proposal validation response from the node + /// before marking that block as invalid and rejecting it + pub block_proposal_validation_timeout: Duration, } /// Internal struct for loading up the config file @@ -180,13 +193,19 @@ struct RawConfigFile { pub db_path: String, /// Metrics endpoint pub metrics_endpoint: Option, - /// How much time must pass between the first block proposal in a tenure and the next bitcoin block - /// before a subsequent miner isn't allowed to reorg the tenure + /// How much time (in secs) must pass between the first block proposal in a tenure and the next bitcoin block + /// before a subsequent miner isn't allowed to reorg the tenure pub first_proposal_burn_block_timing_secs: Option, - /// How much time to wait for a miner to propose a block following a sortition in milliseconds + /// How much time (in millisecs) to wait for a miner to propose a block following a sortition pub block_proposal_timeout_ms: Option, /// An optional custom Chain ID pub chain_id: Option, + /// Time in seconds to wait for the last block of a tenure to be globally accepted or rejected + /// before considering a new miner's block at the same height as potentially valid. + pub tenure_last_block_proposal_timeout_secs: Option, + /// How long to wait (in millisecs) for a response from a block proposal validation response from the node + /// before marking that block as invalid and rejecting it + pub block_proposal_validation_timeout_ms: Option, } impl RawConfigFile { @@ -266,6 +285,18 @@ impl TryFrom for GlobalConfig { .unwrap_or(BLOCK_PROPOSAL_TIMEOUT_MS), ); + let tenure_last_block_proposal_timeout = Duration::from_secs( + raw_data + .tenure_last_block_proposal_timeout_secs + .unwrap_or(DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS), + ); + + let block_proposal_validation_timeout = Duration::from_millis( + raw_data + .block_proposal_validation_timeout_ms + .unwrap_or(BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS), + ); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -279,6 +310,8 @@ impl TryFrom for GlobalConfig { first_proposal_burn_block_timing, block_proposal_timeout, chain_id: raw_data.chain_id, + tenure_last_block_proposal_timeout, + block_proposal_validation_timeout, }) } } @@ -335,7 +368,7 @@ Metrics endpoint: {metrics_endpoint} /// Get the chain ID for the network pub fn to_chain_id(&self) -> u32 { - self.chain_id.unwrap_or_else(|| match self.network { + self.chain_id.unwrap_or(match self.network { Network::Mainnet => CHAIN_ID_MAINNET, Network::Testnet | Network::Mocknet => CHAIN_ID_TESTNET, }) diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 246015bfb7..244675c65c 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -121,14 +121,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner as this could potentially expose sensitive data or functionalities to security risks \ if additional proper security checks are not integrated in place. \ For more information, check the documentation at \ - https://docs.stacks.co/nakamoto-upgrade/signing-and-stacking/faq#what-should-the-networking-setup-for-my-signer-look-like." + https://docs.stacks.co/guides-and-tutorials/running-a-signer#preflight-setup" ); let (res_send, res_recv) = channel(); let ev = SignerEventReceiver::new(config.network.is_mainnet()); - #[cfg(feature = "monitoring_prom")] - { - crate::monitoring::start_serving_monitoring_metrics(config.clone()).ok(); - } + crate::monitoring::start_serving_monitoring_metrics(config.clone()).ok(); let runloop = RunLoop::new(config.clone()); let mut signer: RunLoopSigner = libsigner::Signer::new(runloop, ev, res_send); let running_signer = signer.spawn(endpoint).expect("Failed to spawn signer"); diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index a23918f6f8..eac60cc53f 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -157,11 +157,7 @@ fn handle_generate_stacking_signature( fn handle_check_config(args: RunSignerArgs) { let config = GlobalConfig::try_from(&args.config).unwrap(); - println!( - "Signer version: {}\nConfig: \n{}", - VERSION_STRING.to_string(), - config - ); + println!("Signer version: {}\nConfig: \n{}", *VERSION_STRING, config); } fn handle_generate_vote(args: GenerateVoteArgs, do_print: bool) -> MessageSignature { diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index 621886b9c0..400541d0e7 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -19,11 +19,11 @@ use ::prometheus::HistogramTimer; #[cfg(feature = "monitoring_prom")] use slog::slog_error; #[cfg(not(feature = "monitoring_prom"))] -use slog::slog_warn; +use slog::slog_info; #[cfg(feature = "monitoring_prom")] use stacks_common::error; #[cfg(not(feature = "monitoring_prom"))] -use stacks_common::warn; +use stacks_common::info; use crate::config::GlobalConfig; @@ -97,8 +97,7 @@ pub fn update_signer_nonce(nonce: u64) { #[allow(dead_code)] /// Remove the origin from the full path to avoid duplicate metrics for different origins fn remove_origin_from_path(full_path: &str, origin: &str) -> String { - let path = full_path.replace(origin, ""); - path + full_path.replace(origin, "") } /// Start a new RPC call timer. @@ -144,7 +143,7 @@ pub fn start_serving_monitoring_metrics(config: GlobalConfig) -> Result<(), Stri #[cfg(not(feature = "monitoring_prom"))] { if config.metrics_endpoint.is_some() { - warn!("Not starting monitoring metrics server as the monitoring_prom feature is not enabled"); + info!("`metrics_endpoint` is configured for the signer, but the monitoring_prom feature is not enabled. Not starting monitoring metrics server."); } } Ok(()) diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index a0e2b739e9..c8f6041478 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -283,6 +283,8 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo mainnet: self.config.network.is_mainnet(), db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, + tenure_last_block_proposal_timeout: self.config.tenure_last_block_proposal_timeout, + block_proposal_validation_timeout: self.config.block_proposal_validation_timeout, })) } diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 06b9d703c3..9fcaa1fa1b 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -24,7 +24,6 @@ use blockstack_lib::util_lib::db::{ Error as DBError, }; use clarity::types::chainstate::{BurnchainHeaderHash, StacksAddress}; -use clarity::util::get_epoch_time_secs; use libsigner::BlockProposal; use rusqlite::{ params, Connection, Error as SqliteError, OpenFlags, OptionalExtension, Transaction, @@ -33,6 +32,7 @@ use serde::{Deserialize, Serialize}; use slog::{slog_debug, slog_error}; use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::ConsensusHash; +use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, define_u8_enum, error}; @@ -258,6 +258,22 @@ impl BlockInfo { self.state = state; Ok(()) } + + /// Check if the block is globally accepted or rejected + pub fn has_reached_consensus(&self) -> bool { + matches!( + self.state, + BlockState::GloballyAccepted | BlockState::GloballyRejected + ) + } + + /// Check if the block is locally accepted or rejected + pub fn is_locally_finalized(&self) -> bool { + matches!( + self.state, + BlockState::LocallyAccepted | BlockState::LocallyRejected + ) + } } /// This struct manages a SQLite database connection @@ -308,6 +324,11 @@ static CREATE_INDEXES_3: &str = r#" CREATE INDEX IF NOT EXISTS block_rejection_signer_addrs_on_block_signature_hash ON block_rejection_signer_addrs(signer_signature_hash); "#; +static CREATE_INDEXES_4: &str = r#" +CREATE INDEX IF NOT EXISTS blocks_state ON blocks ((json_extract(block_info, '$.state'))); +CREATE INDEX IF NOT EXISTS blocks_signed_group ON blocks ((json_extract(block_info, '$.signed_group'))); +"#; + static CREATE_SIGNER_STATE_TABLE: &str = " CREATE TABLE IF NOT EXISTS signer_states ( reward_cycle INTEGER PRIMARY KEY, @@ -405,9 +426,14 @@ static SCHEMA_3: &[&str] = &[ "INSERT INTO db_config (version) VALUES (3);", ]; +static SCHEMA_4: &[&str] = &[ + CREATE_INDEXES_4, + "INSERT OR REPLACE INTO db_config (version) VALUES (4);", +]; + impl SignerDb { /// The current schema version used in this build of the signer binary. - pub const SCHEMA_VERSION: u32 = 3; + pub const SCHEMA_VERSION: u32 = 4; /// Create a new `SignerState` instance. /// This will create a new SQLite database at the given path @@ -427,7 +453,7 @@ impl SignerDb { return Ok(0); } let result = conn - .query_row("SELECT version FROM db_config LIMIT 1", [], |row| { + .query_row("SELECT MAX(version) FROM db_config LIMIT 1", [], |row| { row.get(0) }) .optional(); @@ -479,6 +505,20 @@ impl SignerDb { Ok(()) } + /// Migrate from schema 3 to schema 4 + fn schema_4_migration(tx: &Transaction) -> Result<(), DBError> { + if Self::get_schema_version(tx)? >= 4 { + // no migration necessary + return Ok(()); + } + + for statement in SCHEMA_4.iter() { + tx.execute_batch(statement)?; + } + + Ok(()) + } + /// Either instantiate a new database, or migrate an existing one /// If the detected version of the existing database is 0 (i.e., a pre-migration /// logic DB, the DB will be dropped). @@ -490,7 +530,8 @@ impl SignerDb { 0 => Self::schema_1_migration(&sql_tx)?, 1 => Self::schema_2_migration(&sql_tx)?, 2 => Self::schema_3_migration(&sql_tx)?, - 3 => break, + 3 => Self::schema_4_migration(&sql_tx)?, + 4 => break, x => return Err(DBError::Other(format!( "Database schema is newer than supported by this binary. Expected version = {}, Database version = {x}", Self::SCHEMA_VERSION, @@ -600,6 +641,15 @@ impl SignerDb { try_deserialize(result) } + /// Return the canonical tip -- the last globally accepted block. + pub fn get_canonical_tip(&self) -> Result, DBError> { + let query = "SELECT block_info FROM blocks WHERE json_extract(block_info, '$.state') = ?1 ORDER BY stacks_height DESC, json_extract(block_info, '$.signed_group') DESC LIMIT 1"; + let args = params![&BlockState::GloballyAccepted.to_string()]; + let result: Option = query_row(&self.db, query, args)?; + + try_deserialize(result) + } + /// Insert or replace a burn block into the database pub fn insert_burn_block( &mut self, @@ -1226,4 +1276,45 @@ mod tests { assert!(!block.check_state(BlockState::GloballyAccepted)); assert!(block.check_state(BlockState::GloballyRejected)); } + + #[test] + fn test_get_canonical_tip() { + let db_path = tmp_db_path(); + let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); + + let (mut block_info_1, _block_proposal_1) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x01; 65]); + b.block.header.chain_length = 1; + b.burn_height = 1; + }); + + let (mut block_info_2, _block_proposal_2) = create_block_override(|b| { + b.block.header.miner_signature = MessageSignature([0x02; 65]); + b.block.header.chain_length = 2; + b.burn_height = 2; + }); + + db.insert_block(&block_info_1) + .expect("Unable to insert block into db"); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + + assert!(db.get_canonical_tip().unwrap().is_none()); + + block_info_1 + .mark_globally_accepted() + .expect("Failed to mark block as globally accepted"); + db.insert_block(&block_info_1) + .expect("Unable to insert block into db"); + + assert_eq!(db.get_canonical_tip().unwrap().unwrap(), block_info_1); + + block_info_2 + .mark_globally_accepted() + .expect("Failed to mark block as globally accepted"); + db.insert_block(&block_info_2) + .expect("Unable to insert block into db"); + + assert_eq!(db.get_canonical_tip().unwrap().unwrap(), block_info_2); + } } diff --git a/stacks-signer/src/tests/chainstate.rs b/stacks-signer/src/tests/chainstate.rs index 886480f063..bec9f1258d 100644 --- a/stacks-signer/src/tests/chainstate.rs +++ b/stacks-signer/src/tests/chainstate.rs @@ -89,6 +89,7 @@ fn setup_test_environment( config: ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(30), block_proposal_timeout: Duration::from_secs(5), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }, }; diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 2cb10a9817..b537cfae8a 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -15,11 +15,13 @@ use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; +use std::time::{Duration, Instant}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; +use blockstack_lib::util_lib::db::Error as DBError; use clarity::types::chainstate::StacksPrivateKey; use clarity::types::{PrivateKey, StacksEpochId}; use clarity::util::hash::MerkleHashFunc; @@ -85,6 +87,11 @@ pub struct Signer { pub signer_db: SignerDb, /// Configuration for proposal evaluation pub proposal_config: ProposalEvalConfig, + /// How long to wait for a block proposal validation response to arrive before + /// marking a submitted block as invalid + pub block_proposal_validation_timeout: Duration, + /// The current submitted block proposal and its submission time + pub submitted_block_proposal: Option<(BlockProposal, Instant)>, } impl std::fmt::Display for Signer { @@ -127,6 +134,7 @@ impl SignerTrait for Signer { if event_parity == Some(other_signer_parity) { return; } + self.check_submitted_block_proposal(); debug!("{self}: Processing event: {event:?}"); let Some(event) = event else { // No event. Do nothing. @@ -191,7 +199,7 @@ impl SignerTrait for Signer { "block_height" => b.header.chain_length, "signer_sighash" => %b.header.signer_signature_hash(), ); - stacks_client.post_block_until_ok(self, &b); + stacks_client.post_block_until_ok(self, b); } SignerMessage::MockProposal(mock_proposal) => { let epoch = match stacks_client.get_node_epoch() { @@ -274,6 +282,8 @@ impl From for Signer { reward_cycle: signer_config.reward_cycle, signer_db, proposal_config, + submitted_block_proposal: None, + block_proposal_validation_timeout: signer_config.block_proposal_validation_timeout, } } } @@ -339,17 +349,23 @@ impl Signer { }; // Submit a proposal response to the .signers contract for miners debug!("{self}: Broadcasting a block response to stacks node: {block_response:?}"); - if let Err(e) = self + let accepted = matches!(block_response, BlockResponse::Accepted(..)); + match self .stackerdb .send_message_with_retry::(block_response.into()) { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + Ok(_) => { + crate::monitoring::increment_block_responses_sent(accepted); + } + Err(e) => { + warn!("{self}: Failed to send block response to stacker-db: {e:?}",); + } } return; } info!( - "{self}: received a block proposal for a new block. Submit block for validation. "; + "{self}: received a block proposal for a new block."; "signer_sighash" => %signer_signature_hash, "block_id" => %block_proposal.block.block_id(), "block_height" => block_proposal.block.header.chain_length, @@ -428,30 +444,8 @@ impl Signer { }; #[cfg(any(test, feature = "testing"))] - let block_response = match &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() { - Some(public_keys) => { - if public_keys.contains( - &stacks_common::types::chainstate::StacksPublicKey::from_private( - &self.private_key, - ), - ) { - warn!("{self}: Rejecting block proposal automatically due to testing directive"; - "block_id" => %block_proposal.block.block_id(), - "height" => block_proposal.block.header.chain_length, - "consensus_hash" => %block_proposal.block.header.consensus_hash - ); - Some(BlockResponse::rejected( - block_proposal.block.header.signer_signature_hash(), - RejectCode::TestingDirective, - &self.private_key, - self.mainnet, - )) - } else { - None - } - } - None => block_response, - }; + let block_response = + self.test_reject_block_proposal(block_proposal, &mut block_info, block_response); if let Some(block_response) = block_response { // We know proposal is invalid. Send rejection message, do not do further validation @@ -472,17 +466,38 @@ impl Signer { Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), } } else { - // We don't know if proposal is valid, submit to stacks-node for further checks and store it locally. - // Do not store invalid blocks as this could DOS the signer. We only store blocks that are valid or unknown. - stacks_client - .submit_block_for_validation(block_info.block.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}"); - }); + // Just in case check if the last block validation submission timed out. + self.check_submitted_block_proposal(); + if self.submitted_block_proposal.is_none() { + // We don't know if proposal is valid, submit to stacks-node for further checks and store it locally. + info!( + "{self}: submitting block proposal for validation"; + "signer_sighash" => %signer_signature_hash, + "block_id" => %block_proposal.block.block_id(), + "block_height" => block_proposal.block.header.chain_length, + "burn_height" => block_proposal.burn_height, + ); + match stacks_client.submit_block_for_validation(block_info.block.clone()) { + Ok(_) => { + self.submitted_block_proposal = + Some((block_proposal.clone(), Instant::now())); + } + Err(e) => { + warn!("{self}: Failed to submit block for validation: {e:?}"); + } + }; + } else { + // Still store the block but log we can't submit it for validation. We may receive enough signatures/rejections + // from other signers to push the proposed block into a global rejection/acceptance regardless of our participation. + // However, we will not be able to participate beyond this until our block submission times out or we receive a response + // from our node. + warn!("{self}: cannot submit block proposal for validation as we are already waiting for a response for a prior submission") + } + // Do not store KNOWN invalid blocks as this could DOS the signer. We only store blocks that are valid or unknown. self.signer_db .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + .unwrap_or_else(|e| self.handle_insert_block_error(e)); } } @@ -509,20 +524,27 @@ impl Signer { ) -> Option { crate::monitoring::increment_block_validation_responses(true); let signer_signature_hash = block_validate_ok.signer_signature_hash; + if self + .submitted_block_proposal + .as_ref() + .map(|(proposal, _)| { + proposal.block.header.signer_signature_hash() == signer_signature_hash + }) + .unwrap_or(false) + { + self.submitted_block_proposal = None; + } // For mutability reasons, we need to take the block_info out of the map and add it back after processing let mut block_info = match self .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) { Ok(Some(block_info)) => { - if block_info.state == BlockState::GloballyRejected - || block_info.state == BlockState::GloballyAccepted - { + if block_info.is_locally_finalized() { debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); return None; - } else { - block_info } + block_info } Ok(None) => { // We have not seen this block before. Why are we getting a response for it? @@ -535,8 +557,11 @@ impl Signer { } }; if let Err(e) = block_info.mark_locally_accepted(false) { - warn!("{self}: Failed to mark block as locally accepted: {e:?}",); - return None; + if !block_info.has_reached_consensus() { + warn!("{self}: Failed to mark block as locally accepted: {e:?}",); + return None; + } + block_info.signed_self.get_or_insert(get_epoch_time_secs()); } let signature = self .private_key @@ -545,7 +570,7 @@ impl Signer { self.signer_db .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + .unwrap_or_else(|e| self.handle_insert_block_error(e)); let accepted = BlockAccepted::new(block_info.signer_signature_hash(), signature); // have to save the signature _after_ the block info self.handle_block_signature(stacks_client, &accepted); @@ -559,11 +584,27 @@ impl Signer { ) -> Option { crate::monitoring::increment_block_validation_responses(false); let signer_signature_hash = block_validate_reject.signer_signature_hash; + if self + .submitted_block_proposal + .as_ref() + .map(|(proposal, _)| { + proposal.block.header.signer_signature_hash() == signer_signature_hash + }) + .unwrap_or(false) + { + self.submitted_block_proposal = None; + } let mut block_info = match self .signer_db .block_lookup(self.reward_cycle, &signer_signature_hash) { - Ok(Some(block_info)) => block_info, + Ok(Some(block_info)) => { + if block_info.is_locally_finalized() { + debug!("{self}: Received block validation for a block that is already marked as {}. Ignoring...", block_info.state); + return None; + } + block_info + } Ok(None) => { // We have not seen this block before. Why are we getting a response for it? debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); @@ -575,8 +616,10 @@ impl Signer { } }; if let Err(e) = block_info.mark_locally_rejected() { - warn!("{self}: Failed to mark block as locally rejected: {e:?}",); - return None; + if !block_info.has_reached_consensus() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + return None; + } } let block_rejection = BlockRejection::from_validate_rejection( block_validate_reject.clone(), @@ -585,7 +628,7 @@ impl Signer { ); self.signer_db .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); + .unwrap_or_else(|e| self.handle_insert_block_error(e)); self.handle_block_rejection(&block_rejection); Some(BlockResponse::Rejected(block_rejection)) } @@ -612,12 +655,12 @@ impl Signer { info!( "{self}: Broadcasting a block response to stacks node: {response:?}"; ); + let accepted = matches!(response, BlockResponse::Accepted(..)); match self .stackerdb - .send_message_with_retry::(response.clone().into()) + .send_message_with_retry::(response.into()) { Ok(_) => { - let accepted = matches!(response, BlockResponse::Accepted(..)); crate::monitoring::increment_block_responses_sent(accepted); } Err(e) => { @@ -626,6 +669,81 @@ impl Signer { } } + /// Check the current tracked submitted block proposal to see if it has timed out. + /// Broadcasts a rejection and marks the block locally rejected if it has. + fn check_submitted_block_proposal(&mut self) { + let Some((block_proposal, block_submission)) = self.submitted_block_proposal.take() else { + // Nothing to check. + return; + }; + if block_submission.elapsed() < self.block_proposal_validation_timeout { + // Not expired yet. Put it back! + self.submitted_block_proposal = Some((block_proposal, block_submission)); + return; + } + let signature_sighash = block_proposal.block.header.signer_signature_hash(); + // For mutability reasons, we need to take the block_info out of the map and add it back after processing + let mut block_info = match self + .signer_db + .block_lookup(self.reward_cycle, &signature_sighash) + { + Ok(Some(block_info)) => { + if block_info.state == BlockState::GloballyRejected + || block_info.state == BlockState::GloballyAccepted + { + // The block has already reached consensus. + return; + } + block_info + } + Ok(None) => { + // This is weird. If this is reached, its probably an error in code logic or the db was flushed. + // Why are we tracking a block submission for a block we have never seen / stored before. + error!("{self}: tracking an unknown block validation submission."; + "signer_sighash" => %signature_sighash, + "block_id" => %block_proposal.block.block_id(), + ); + return; + } + Err(e) => { + error!("{self}: Failed to lookup block in signer db: {e:?}",); + return; + } + }; + // We cannot determine the validity of the block, but we have not reached consensus on it yet. + // Reject it so we aren't holding up the network because of our inaction. + warn!( + "{self}: Failed to receive block validation response within {} ms. Rejecting block.", self.block_proposal_validation_timeout.as_millis(); + "signer_sighash" => %signature_sighash, + "block_id" => %block_proposal.block.block_id(), + ); + let rejection = BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::ConnectivityIssues, + &self.private_key, + self.mainnet, + ); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; + debug!("{self}: Broadcasting a block response to stacks node: {rejection:?}"); + let res = self + .stackerdb + .send_message_with_retry::(rejection.into()); + + match res { + Err(e) => warn!("{self}: Failed to send block rejection to stacker-db: {e:?}"), + Ok(ack) if !ack.accepted => warn!( + "{self}: Block rejection not accepted by stacker-db: {:?}", + ack.reason + ), + Ok(_) => debug!("{self}: Block rejection accepted by stacker-db"), + } + self.signer_db + .insert_block(&block_info) + .unwrap_or_else(|e| self.handle_insert_block_error(e)); + } + /// Compute the signing weight, given a list of signatures fn compute_signature_signing_weight<'a>( &self, @@ -732,6 +850,15 @@ impl Signer { error!("{self}: Failed to update block state: {e:?}",); panic!("{self} Failed to update block state: {e}"); } + if self + .submitted_block_proposal + .as_ref() + .map(|(proposal, _)| &proposal.block.header.signer_signature_hash() == block_hash) + .unwrap_or(false) + { + // Consensus reached! No longer bother tracking its validation submission to the node as we are too late to participate in the decision anyway. + self.submitted_block_proposal = None; + } } /// Handle an observed signature from another signer @@ -874,6 +1001,15 @@ impl Signer { } } self.broadcast_signed_block(stacks_client, block_info.block, &addrs_to_sigs); + if self + .submitted_block_proposal + .as_ref() + .map(|(proposal, _)| &proposal.block.header.signer_signature_hash() == block_hash) + .unwrap_or(false) + { + // Consensus reached! No longer bother tracking its validation submission to the node as we are too late to participate in the decision anyway. + self.submitted_block_proposal = None; + } } fn broadcast_signed_block( @@ -935,6 +1071,44 @@ impl Signer { false } + #[cfg(any(test, feature = "testing"))] + fn test_reject_block_proposal( + &mut self, + block_proposal: &BlockProposal, + block_info: &mut BlockInfo, + block_response: Option, + ) -> Option { + let Some(public_keys) = &*TEST_REJECT_ALL_BLOCK_PROPOSAL.lock().unwrap() else { + return block_response; + }; + if public_keys.contains( + &stacks_common::types::chainstate::StacksPublicKey::from_private(&self.private_key), + ) { + warn!("{self}: Rejecting block proposal automatically due to testing directive"; + "block_id" => %block_proposal.block.block_id(), + "height" => block_proposal.block.header.chain_length, + "consensus_hash" => %block_proposal.block.header.consensus_hash + ); + if let Err(e) = block_info.mark_locally_rejected() { + warn!("{self}: Failed to mark block as locally rejected: {e:?}",); + }; + // We must insert the block into the DB to prevent subsequent repeat proposals being accepted (should reject + // as invalid since we rejected in a prior round if this crops up again) + // in case this is the first time we saw this block. Safe to do since this is testing case only. + self.signer_db + .insert_block(block_info) + .unwrap_or_else(|e| self.handle_insert_block_error(e)); + Some(BlockResponse::rejected( + block_proposal.block.header.signer_signature_hash(), + RejectCode::TestingDirective, + &self.private_key, + self.mainnet, + )) + } else { + None + } + } + /// Send a mock signature to stackerdb to prove we are still alive fn mock_sign(&mut self, mock_proposal: MockProposal) { info!("{self}: Mock signing mock proposal: {mock_proposal:?}"); @@ -947,4 +1121,10 @@ impl Signer { warn!("{self}: Failed to send mock signature to stacker-db: {e:?}",); } } + + /// Helper for logging insert_block error + fn handle_insert_block_error(&self, e: DBError) { + error!("{self}: Failed to insert block into signer-db: {e:?}"); + panic!("{self} Failed to write block to signerdb: {e}"); + } } diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 40cabd86d3..83c8903d35 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -45,7 +45,7 @@ use crate::burnchains::{ Burnchain, BurnchainBlockHeader, Error as burnchain_error, MagicBytes, BLOCKSTACK_MAGIC_MAINNET, }; use crate::core::{ - StacksEpoch, StacksEpochExtension, STACKS_EPOCHS_MAINNET, STACKS_EPOCHS_REGTEST, + EpochList, StacksEpoch, StacksEpochExtension, STACKS_EPOCHS_MAINNET, STACKS_EPOCHS_REGTEST, STACKS_EPOCHS_TESTNET, }; use crate::util_lib::db::Error as DBError; @@ -91,11 +91,11 @@ impl TryFrom for BitcoinNetworkType { /// Get the default epochs definitions for the given BitcoinNetworkType. /// Should *not* be used except by the BitcoinIndexer when no epochs vector /// was specified. -pub fn get_bitcoin_stacks_epochs(network_id: BitcoinNetworkType) -> Vec { +pub fn get_bitcoin_stacks_epochs(network_id: BitcoinNetworkType) -> EpochList { match network_id { - BitcoinNetworkType::Mainnet => STACKS_EPOCHS_MAINNET.to_vec(), - BitcoinNetworkType::Testnet => STACKS_EPOCHS_TESTNET.to_vec(), - BitcoinNetworkType::Regtest => STACKS_EPOCHS_REGTEST.to_vec(), + BitcoinNetworkType::Mainnet => (*STACKS_EPOCHS_MAINNET).clone(), + BitcoinNetworkType::Testnet => (*STACKS_EPOCHS_TESTNET).clone(), + BitcoinNetworkType::Regtest => (*STACKS_EPOCHS_REGTEST).clone(), } } @@ -112,7 +112,7 @@ pub struct BitcoinIndexerConfig { pub spv_headers_path: String, pub first_block: u64, pub magic_bytes: MagicBytes, - pub epochs: Option>, + pub epochs: Option, } #[derive(Debug)] @@ -1041,7 +1041,7 @@ impl BurnchainIndexer for BitcoinIndexer { /// 2) Use hard-coded static values, otherwise. /// /// It is an error (panic) to set custom epochs if running on `Mainnet`. - fn get_stacks_epochs(&self) -> Vec { + fn get_stacks_epochs(&self) -> EpochList { StacksEpoch::get_epochs(self.runtime.network_id, self.config.epochs.as_ref()) } diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index a5ecaa0458..84a45eb278 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -29,6 +29,7 @@ use stacks_common::util::hash::to_hex; use stacks_common::util::vrf::VRFPublicKey; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log, sleep_ms}; +use super::EpochList; use crate::burnchains::affirmation::update_pox_affirmation_maps; use crate::burnchains::bitcoin::address::{ to_c32_version_byte, BitcoinAddress, LegacyBitcoinAddressType, @@ -702,6 +703,10 @@ impl Burnchain { } pub fn get_burnchaindb_path(&self) -> String { + if self.working_dir.as_str() == ":memory:" { + return ":memory:".to_string(); + } + let chainstate_dir = Burnchain::get_chainstate_path_str(&self.working_dir); let mut db_pathbuf = PathBuf::from(&chainstate_dir); db_pathbuf.push("burnchain.sqlite"); @@ -718,7 +723,7 @@ impl Burnchain { readwrite: bool, first_block_header_hash: BurnchainHeaderHash, first_block_header_timestamp: u64, - epochs: Vec, + epochs: EpochList, ) -> Result<(SortitionDB, BurnchainDB), burnchain_error> { Burnchain::setup_chainstate_dirs(&self.working_dir)?; @@ -743,12 +748,14 @@ impl Burnchain { /// Open just the burnchain database pub fn open_burnchain_db(&self, readwrite: bool) -> Result { let burnchain_db_path = self.get_burnchaindb_path(); - if let Err(e) = fs::metadata(&burnchain_db_path) { - warn!( - "Failed to stat burnchain DB path '{}': {:?}", - &burnchain_db_path, &e - ); - return Err(burnchain_error::DBError(db_error::NoDBError)); + if burnchain_db_path != ":memory:" { + if let Err(e) = fs::metadata(&burnchain_db_path) { + warn!( + "Failed to stat burnchain DB path '{}': {:?}", + &burnchain_db_path, &e + ); + return Err(burnchain_error::DBError(db_error::NoDBError)); + } } test_debug!( "Open burnchain DB at {} (rw? {})", diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 72ca2e8bf1..d5f1e18804 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -1000,33 +1000,38 @@ impl BurnchainDB { readwrite: bool, ) -> Result { let mut create_flag = false; - let open_flags = match fs::metadata(path) { - Err(e) => { - if e.kind() == io::ErrorKind::NotFound { - // need to create - if readwrite { - create_flag = true; - let ppath = Path::new(path); - let pparent_path = ppath - .parent() - .unwrap_or_else(|| panic!("BUG: no parent of '{}'", path)); - fs::create_dir_all(&pparent_path) - .map_err(|e| BurnchainError::from(DBError::IOError(e)))?; - - OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE + let open_flags = if path == ":memory:" { + create_flag = true; + OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE + } else { + match fs::metadata(path) { + Err(e) => { + if e.kind() == io::ErrorKind::NotFound { + // need to create + if readwrite { + create_flag = true; + let ppath = Path::new(path); + let pparent_path = ppath + .parent() + .unwrap_or_else(|| panic!("BUG: no parent of '{}'", path)); + fs::create_dir_all(&pparent_path) + .map_err(|e| BurnchainError::from(DBError::IOError(e)))?; + + OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE + } else { + return Err(BurnchainError::from(DBError::NoDBError)); + } } else { - return Err(BurnchainError::from(DBError::NoDBError)); + return Err(BurnchainError::from(DBError::IOError(e))); } - } else { - return Err(BurnchainError::from(DBError::IOError(e))); } - } - Ok(_md) => { - // can just open - if readwrite { - OpenFlags::SQLITE_OPEN_READ_WRITE - } else { - OpenFlags::SQLITE_OPEN_READ_ONLY + Ok(_md) => { + // can just open + if readwrite { + OpenFlags::SQLITE_OPEN_READ_WRITE + } else { + OpenFlags::SQLITE_OPEN_READ_ONLY + } } } }; @@ -1089,7 +1094,7 @@ impl BurnchainDB { let conn = sqlite_open(path, open_flags, true)?; let mut db = BurnchainDB { conn }; - if readwrite { + if readwrite || path == ":memory:" { db.add_indexes()?; } Ok(db) diff --git a/stackslib/src/burnchains/indexer.rs b/stackslib/src/burnchains/indexer.rs index 5d8eef99a6..ebca4730df 100644 --- a/stackslib/src/burnchains/indexer.rs +++ b/stackslib/src/burnchains/indexer.rs @@ -62,7 +62,7 @@ pub trait BurnchainIndexer { fn get_first_block_height(&self) -> u64; fn get_first_block_header_hash(&self) -> Result; fn get_first_block_header_timestamp(&self) -> Result; - fn get_stacks_epochs(&self) -> Vec; + fn get_stacks_epochs(&self) -> EpochList; fn get_headers_path(&self) -> String; fn get_headers_height(&self) -> Result; diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 0bc68897cb..3e153df53b 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -450,6 +450,7 @@ impl PoxConstants { ) } + // NOTE: this is the *old* pre-Nakamoto testnet pub fn testnet_default() -> PoxConstants { PoxConstants::new( POX_REWARD_CYCLE_LENGTH / 2, // 1050 @@ -468,6 +469,10 @@ impl PoxConstants { ) // total liquid supply is 40000000000000000 µSTX } + pub fn nakamoto_testnet_default() -> PoxConstants { + PoxConstants::new(900, 100, 51, 100, 0, u64::MAX, u64::MAX, 242, 243, 246, 244) + } + // TODO: add tests from mutation testing results #4838 #[cfg_attr(test, mutants::skip)] pub fn regtest_default() -> PoxConstants { diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index e7fa51a89c..887b56861b 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -351,10 +351,30 @@ impl TestMinerFactory { impl TestBurnchainBlock { pub fn new(parent_snapshot: &BlockSnapshot, fork_id: u64) -> TestBurnchainBlock { + let burn_header_hash = BurnchainHeaderHash::from_test_data( + parent_snapshot.block_height + 1, + &parent_snapshot.index_root, + fork_id, + ); TestBurnchainBlock { parent_snapshot: parent_snapshot.clone(), block_height: parent_snapshot.block_height + 1, - txs: vec![], + txs: vec![ + // make sure that no block-commit gets vtxindex == 0 unless explicitly structured. + // This prestx mocks a burnchain coinbase + BlockstackOperationType::PreStx(PreStxOp { + output: StacksAddress::burn_address(false), + txid: Txid::from_test_data( + parent_snapshot.block_height + 1, + 0, + &burn_header_hash, + 128, + ), + vtxindex: 0, + block_height: parent_snapshot.block_height + 1, + burn_header_hash, + }), + ], fork_id: fork_id, timestamp: get_epoch_time_secs(), } @@ -397,6 +417,7 @@ impl TestBurnchainBlock { parent_block_snapshot: Option<&BlockSnapshot>, new_seed: Option, epoch_marker: u8, + parent_is_shadow: bool, ) -> LeaderBlockCommitOp { let pubks = miner .privks @@ -435,6 +456,13 @@ impl TestBurnchainBlock { ) .expect("FATAL: failed to read block commit"); + if parent_is_shadow { + assert!( + get_commit_res.is_none(), + "FATAL: shadow parent should not have a block-commit" + ); + } + let input = SortitionDB::get_last_block_commit_by_sender(ic.conn(), &apparent_sender) .unwrap() .map(|commit| (commit.txid.clone(), 1 + (commit.commit_outs.len() as u32))) @@ -454,7 +482,8 @@ impl TestBurnchainBlock { block_hash, self.block_height, &new_seed, - &parent, + parent.block_height as u32, + parent.vtxindex as u16, leader_key.block_height as u32, leader_key.vtxindex as u16, burn_fee, @@ -464,16 +493,42 @@ impl TestBurnchainBlock { txop } None => { - // initial - let txop = LeaderBlockCommitOp::initial( - block_hash, - self.block_height, - &new_seed, - leader_key, - burn_fee, - &input, - &apparent_sender, - ); + let txop = if parent_is_shadow { + test_debug!( + "Block-commit for {} (burn height {}) builds on shadow sortition", + block_hash, + self.block_height + ); + + LeaderBlockCommitOp::new( + block_hash, + self.block_height, + &new_seed, + last_snapshot_with_sortition.block_height as u32, + 0, + leader_key.block_height as u32, + leader_key.vtxindex as u16, + burn_fee, + &input, + &apparent_sender, + ) + } else { + // initial + test_debug!( + "Block-commit for {} (burn height {}) builds on genesis", + block_hash, + self.block_height, + ); + LeaderBlockCommitOp::initial( + block_hash, + self.block_height, + &new_seed, + leader_key, + burn_fee, + &input, + &apparent_sender, + ) + }; txop } }; @@ -517,6 +572,7 @@ impl TestBurnchainBlock { parent_block_snapshot, None, STACKS_EPOCH_2_4_MARKER, + false, ) } diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 53dc2d0547..dd543ac7f7 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2969,9 +2969,9 @@ impl SortitionDB { db_tx: &Transaction, epochs: &[StacksEpoch], ) -> Result<(), db_error> { - let epochs = StacksEpoch::validate_epochs(epochs); + let epochs: &[StacksEpoch] = &StacksEpoch::validate_epochs(epochs); let existing_epochs = Self::get_stacks_epochs(db_tx)?; - if existing_epochs == epochs { + if &existing_epochs == epochs { return Ok(()); } @@ -3482,9 +3482,10 @@ impl SortitionDB { tx.commit()?; } else if version == expected_version { // this transaction is almost never needed - let validated_epochs = StacksEpoch::validate_epochs(epochs); + let validated_epochs: &[StacksEpoch] = + &StacksEpoch::validate_epochs(epochs); let existing_epochs = Self::get_stacks_epochs(self.conn())?; - if existing_epochs == validated_epochs { + if &existing_epochs == validated_epochs { return Ok(()); } @@ -3689,6 +3690,12 @@ impl SortitionDB { .try_into() .ok() } + + /// Get the Stacks block ID for the canonical tip. + pub fn get_canonical_stacks_tip_block_id(&self) -> StacksBlockId { + let (ch, bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.conn()).unwrap(); + StacksBlockId::new(&ch, &bh) + } } impl<'a> SortitionDBTx<'a> { @@ -6636,7 +6643,7 @@ pub mod tests { pub fn connect_test_with_epochs( first_block_height: u64, first_burn_hash: &BurnchainHeaderHash, - epochs: Vec, + epochs: EpochList, ) -> Result { let mut rng = rand::thread_rng(); let mut buf = [0u8; 32]; @@ -10930,10 +10937,9 @@ pub mod tests { fs::create_dir_all(path_root).unwrap(); - let mut bad_epochs = STACKS_EPOCHS_MAINNET.to_vec(); - let idx = bad_epochs.len() - 2; - bad_epochs[idx].end_height += 1; - bad_epochs[idx + 1].start_height += 1; + let mut bad_epochs = (*STACKS_EPOCHS_MAINNET).clone(); + bad_epochs[StacksEpochId::Epoch25].end_height += 1; + bad_epochs[StacksEpochId::Epoch30].start_height += 1; let sortdb = SortitionDB::connect( &format!("{}/sortdb.sqlite", &path_root), @@ -10948,14 +10954,14 @@ pub mod tests { .unwrap(); let db_epochs = SortitionDB::get_stacks_epochs(sortdb.conn()).unwrap(); - assert_eq!(db_epochs, bad_epochs); + assert_eq!(db_epochs, bad_epochs.to_vec()); let fixed_sortdb = SortitionDB::connect( &format!("{}/sortdb.sqlite", &path_root), 0, &BurnchainHeaderHash([0x00; 32]), 0, - &STACKS_EPOCHS_MAINNET.to_vec(), + &STACKS_EPOCHS_MAINNET, PoxConstants::mainnet_default(), None, true, diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 910315f082..c3a378ddf6 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -136,7 +136,8 @@ impl LeaderBlockCommitOp { block_header_hash: &BlockHeaderHash, block_height: u64, new_seed: &VRFSeed, - parent: &LeaderBlockCommitOp, + parent_block_height: u32, + parent_vtxindex: u16, key_block_ptr: u32, key_vtxindex: u16, burn_fee: u64, @@ -148,8 +149,8 @@ impl LeaderBlockCommitOp { new_seed: new_seed.clone(), key_block_ptr: key_block_ptr, key_vtxindex: key_vtxindex, - parent_block_ptr: parent.block_height as u32, - parent_vtxindex: parent.vtxindex as u16, + parent_block_ptr: parent_block_height, + parent_vtxindex: parent_vtxindex, memo: vec![], burn_fee: burn_fee, input: input.clone(), @@ -696,8 +697,19 @@ impl LeaderBlockCommitOp { // is descendant let directly_descended_from_anchor = epoch_id.block_commits_to_parent() && self.block_header_hash == reward_set_info.anchor_block; - let descended_from_anchor = directly_descended_from_anchor || tx - .descended_from(parent_block_height, &reward_set_info.anchor_block) + + // second, if we're in a nakamoto epoch, and the parent block has vtxindex 0 (i.e. the + // coinbase of the burnchain block), then assume that this block descends from the anchor + // block for the purposes of validating its PoX payouts. The block validation logic will + // check that the parent block is indeed a shadow block, and that `self.parent_block_ptr` + // points to the shadow block's tenure's burnchain block. + let maybe_shadow_parent = epoch_id.supports_shadow_blocks() + && self.parent_block_ptr != 0 + && self.parent_vtxindex == 0; + + let descended_from_anchor = directly_descended_from_anchor + || maybe_shadow_parent + || tx.descended_from(parent_block_height, &reward_set_info.anchor_block) .map_err(|e| { error!("Failed to check whether parent (height={}) is descendent of anchor block={}: {}", parent_block_height, &reward_set_info.anchor_block, e); @@ -1031,10 +1043,12 @@ impl LeaderBlockCommitOp { return Err(op_error::BlockCommitNoParent); } else if self.parent_block_ptr != 0 || self.parent_vtxindex != 0 { // not building off of genesis, so the parent block must exist + // unless the parent is a shadow block let has_parent = tx .get_block_commit_parent(parent_block_height, self.parent_vtxindex.into(), &tx_tip)? .is_some(); - if !has_parent { + let maybe_shadow_block = self.parent_vtxindex == 0 && epoch_id.supports_shadow_blocks(); + if !has_parent && !maybe_shadow_block { warn!("Invalid block commit: no parent block in this fork"; "apparent_sender" => %apparent_sender_repr ); diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index d566113fad..f203ea5e28 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -290,7 +290,7 @@ pub fn setup_states_with_epochs( pox_consts: Option, initial_balances: Option>, stacks_epoch_id: StacksEpochId, - epochs_opt: Option>, + epochs_opt: Option, ) { let mut burn_block = None; let mut others = vec![]; diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index cb1966d806..fc7c8ba504 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -58,6 +58,9 @@ use crate::monitoring::increment_stx_blocks_processed_counter; use crate::net::Error as NetError; use crate::util_lib::db::Error as DBError; +#[cfg(any(test, feature = "testing"))] +pub static TEST_COORDINATOR_STALL: std::sync::Mutex> = std::sync::Mutex::new(None); + #[cfg(test)] pub mod tests; @@ -484,7 +487,14 @@ pub fn load_nakamoto_reward_set( let Some(anchor_block_header) = prepare_phase_sortitions .into_iter() .find_map(|sn| { - if !sn.sortition { + let shadow_tenure = match chain_state.nakamoto_blocks_db().is_shadow_tenure(&sn.consensus_hash) { + Ok(x) => x, + Err(e) => { + return Some(Err(e)); + } + }; + + if !sn.sortition && !shadow_tenure { return None } @@ -757,6 +767,21 @@ impl< true } + #[cfg(any(test, feature = "testing"))] + fn fault_injection_pause_nakamoto_block_processing() { + if *TEST_COORDINATOR_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Coordinator is stalled due to testing directive"); + while *TEST_COORDINATOR_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + warn!("Coordinator is no longer stalled due to testing directive. Continuing..."); + } + } + + #[cfg(not(any(test, feature = "testing")))] + fn fault_injection_pause_nakamoto_block_processing() {} + /// Handle one or more new Nakamoto Stacks blocks. /// If we process a PoX anchor block, then return its block hash. This unblocks processing the /// next reward cycle's burnchain blocks. Subsequent calls to this function will terminate @@ -769,6 +794,8 @@ impl< ); loop { + Self::fault_injection_pause_nakamoto_block_processing(); + // process at most one block per loop pass let mut processed_block_receipt = match NakamotoChainState::process_next_nakamoto_block( &mut self.chain_state_db, diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 23bf3313e9..0525717981 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -576,7 +576,7 @@ impl<'a> TestPeer<'a> { coinbase_tx: &StacksTransaction, miner_setup: F, after_block: G, - ) -> NakamotoBlock + ) -> Result where F: FnMut(&mut NakamotoBlockBuilder), G: FnMut(&mut NakamotoBlock) -> bool, @@ -606,7 +606,7 @@ impl<'a> TestPeer<'a> { coinbase_tx: &StacksTransaction, miner_setup: F, after_block: G, - ) -> NakamotoBlock + ) -> Result where F: FnMut(&mut NakamotoBlockBuilder), G: FnMut(&mut NakamotoBlock) -> bool, @@ -631,7 +631,7 @@ impl<'a> TestPeer<'a> { sortdb, &sender_key, sender_acct.nonce, - 100, + 200, 1, &recipient_addr, ); @@ -642,10 +642,10 @@ impl<'a> TestPeer<'a> { } }, after_block, - ); + )?; assert_eq!(blocks_and_sizes.len(), 1); let block = blocks_and_sizes.pop().unwrap().0; - block + Ok(block) } pub fn mine_tenure(&mut self, block_builder: F) -> Vec<(NakamotoBlock, u64, ExecutionCost)> @@ -707,15 +707,41 @@ impl<'a> TestPeer<'a> { block_builder, |_| true, ) + .unwrap() } pub fn single_block_tenure( &mut self, sender_key: &StacksPrivateKey, miner_setup: S, - mut after_burn_ops: F, + after_burn_ops: F, after_block: G, ) -> (NakamotoBlock, u64, StacksTransaction, StacksTransaction) + where + S: FnMut(&mut NakamotoBlockBuilder), + F: FnMut(&mut Vec), + G: FnMut(&mut NakamotoBlock) -> bool, + { + self.single_block_tenure_fallible(sender_key, miner_setup, after_burn_ops, after_block) + .unwrap() + } + + /// Produce a single-block tenure, containing a stx-transfer sent from `sender_key`. + /// + /// * `after_burn_ops` is called right after `self.begin_nakamoto_tenure` to modify any burn ops + /// for this tenure + /// + /// * `miner_setup` is called right after the Nakamoto block builder is constructed, but before + /// any txs are mined + /// + /// * `after_block` is called right after the block is assembled, but before it is signed. + pub fn single_block_tenure_fallible( + &mut self, + sender_key: &StacksPrivateKey, + miner_setup: S, + mut after_burn_ops: F, + after_block: G, + ) -> Result<(NakamotoBlock, u64, StacksTransaction, StacksTransaction), ChainstateError> where S: FnMut(&mut NakamotoBlockBuilder), F: FnMut(&mut Vec), @@ -770,9 +796,9 @@ impl<'a> TestPeer<'a> { &coinbase_tx, miner_setup, after_block, - ); + )?; - (block, burn_height, tenure_change_tx, coinbase_tx) + Ok((block, burn_height, tenure_change_tx, coinbase_tx)) } } @@ -1422,24 +1448,27 @@ fn pox_treatment() { // set the bitvec to a heterogenous one: either punish or // reward is acceptable, so this block should just process. - let block = peer.mine_single_block_tenure( - &private_key, - &tenure_change_tx, - &coinbase_tx, - |_| {}, - |block| { - // each stacker has 3 entries in the bitvec. - // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 - block.header.pox_treatment = BitVec::try_from( - [ - false, false, true, false, false, true, false, false, true, false, false, true, - ] - .as_slice(), - ) - .unwrap(); - true - }, - ); + let block = peer + .mine_single_block_tenure( + &private_key, + &tenure_change_tx, + &coinbase_tx, + |_| {}, + |block| { + // each stacker has 3 entries in the bitvec. + // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 + block.header.pox_treatment = BitVec::try_from( + [ + false, false, true, false, false, true, false, false, true, false, false, + true, + ] + .as_slice(), + ) + .unwrap(); + true + }, + ) + .unwrap(); blocks.push(block); // now we need to test punishment! @@ -1510,23 +1539,26 @@ fn pox_treatment() { // set the bitvec to a heterogenous one: either punish or // reward is acceptable, so this block should just process. - let block = peer.mine_single_block_tenure( - &private_key, - &tenure_change_tx, - &coinbase_tx, - |miner| { - // each stacker has 3 entries in the bitvec. - // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 - miner.header.pox_treatment = BitVec::try_from( - [ - false, false, true, false, false, true, false, false, true, false, false, true, - ] - .as_slice(), - ) - .unwrap(); - }, - |_block| true, - ); + let block = peer + .mine_single_block_tenure( + &private_key, + &tenure_change_tx, + &coinbase_tx, + |miner| { + // each stacker has 3 entries in the bitvec. + // entries are ordered by PoxAddr, so this makes every entry a 1-of-3 + miner.header.pox_treatment = BitVec::try_from( + [ + false, false, true, false, false, true, false, false, true, false, false, + true, + ] + .as_slice(), + ) + .unwrap(); + }, + |_block| true, + ) + .unwrap(); blocks.push(block); let tip = { @@ -3212,7 +3244,7 @@ fn test_stacks_on_burnchain_ops() { // mocked txid: Txid([i as u8; 32]), - vtxindex: 1, + vtxindex: 11, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), })); @@ -3232,7 +3264,7 @@ fn test_stacks_on_burnchain_ops() { // mocked txid: Txid([(i as u8) | 0x80; 32]), - vtxindex: 2, + vtxindex: 12, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), })); @@ -3244,7 +3276,7 @@ fn test_stacks_on_burnchain_ops() { // mocked txid: Txid([(i as u8) | 0x40; 32]), - vtxindex: 3, + vtxindex: 13, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), })); @@ -3263,7 +3295,7 @@ fn test_stacks_on_burnchain_ops() { // mocked txid: Txid([(i as u8) | 0xc0; 32]), - vtxindex: 4, + vtxindex: 14, block_height: block_height + 1, burn_header_hash: BurnchainHeaderHash([0x00; 32]), }, diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 04401a0d9b..74ecd19bc1 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -25,10 +25,13 @@ use clarity::vm::analysis::{CheckError, CheckErrors}; use clarity::vm::ast::errors::ParseErrors; use clarity::vm::ast::ASTRules; use clarity::vm::clarity::TransactionConnection; -use clarity::vm::costs::ExecutionCost; +use clarity::vm::costs::{ExecutionCost, LimitedCostTracker, TrackerData}; use clarity::vm::database::BurnStateDB; use clarity::vm::errors::Error as InterpreterError; -use clarity::vm::types::{QualifiedContractIdentifier, TypeSignature}; +use clarity::vm::types::{ + QualifiedContractIdentifier, StacksAddressExtensions as ClarityStacksAddressExtensions, + TypeSignature, +}; use libstackerdb::StackerDBChunkData; use serde::Deserialize; use stacks_common::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; @@ -37,8 +40,9 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::get_epoch_time_ms; -use stacks_common::util::hash::{Hash160, MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::hash::{hex_bytes, Hash160, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; +use stacks_common::util::vrf::VRFProof; use crate::burnchains::{PrivateKey, PublicKey}; use crate::chainstate::burn::db::sortdb::{ @@ -58,8 +62,8 @@ use crate::chainstate::stacks::db::transactions::{ handle_clarity_runtime_error, ClarityRuntimeTxError, }; use crate::chainstate::stacks::db::{ - ChainstateTx, ClarityTx, MinerRewardInfo, StacksBlockHeaderTypes, StacksChainState, - StacksHeaderInfo, MINER_REWARD_MATURITY, + ChainstateTx, ClarityTx, MinerRewardInfo, StacksAccount, StacksBlockHeaderTypes, + StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, }; use crate::chainstate::stacks::events::{StacksTransactionEvent, StacksTransactionReceipt}; use crate::chainstate::stacks::miner::{ @@ -117,13 +121,15 @@ pub struct NakamotoBlockBuilder { /// Total burn this block represents total_burn: u64, /// Matured miner rewards to process, if any. - matured_miner_rewards_opt: Option, + pub(crate) matured_miner_rewards_opt: Option, /// bytes of space consumed so far pub bytes_so_far: u64, /// transactions selected txs: Vec, /// header we're filling in pub header: NakamotoBlockHeader, + /// Optional soft limit for this block's budget usage + soft_limit: Option, } pub struct MinerTenureInfo<'a> { @@ -141,7 +147,7 @@ pub struct MinerTenureInfo<'a> { pub coinbase_height: u64, pub cause: Option, pub active_reward_set: boot::RewardSet, - pub tenure_block_commit: LeaderBlockCommitOp, + pub tenure_block_commit_opt: Option, } impl NakamotoBlockBuilder { @@ -159,6 +165,7 @@ impl NakamotoBlockBuilder { bytes_so_far: 0, txs: vec![], header: NakamotoBlockHeader::genesis(), + soft_limit: None, } } @@ -176,6 +183,10 @@ impl NakamotoBlockBuilder { /// /// * `coinbase` - the coinbase tx if this is going to start a new tenure /// + /// * `bitvec_len` - the length of the bitvec of reward addresses that should be punished or not in this block. + /// + /// * `soft_limit` - an optional soft limit for the block's clarity cost for this block + /// pub fn new( parent_stacks_header: &StacksHeaderInfo, tenure_id_consensus_hash: &ConsensusHash, @@ -183,6 +194,7 @@ impl NakamotoBlockBuilder { tenure_change: Option<&StacksTransaction>, coinbase: Option<&StacksTransaction>, bitvec_len: u16, + soft_limit: Option, ) -> Result { let next_height = parent_stacks_header .anchored_header @@ -222,6 +234,7 @@ impl NakamotoBlockBuilder { .map(|b| b.timestamp) .unwrap_or(0), ), + soft_limit, }) } @@ -235,7 +248,21 @@ impl NakamotoBlockBuilder { burn_dbconn: &'a SortitionHandleConn, cause: Option, ) -> Result, Error> { - debug!("Nakamoto miner tenure begin"); + self.inner_load_tenure_info(chainstate, burn_dbconn, cause, false) + } + + /// This function should be called before `tenure_begin`. + /// It creates a MinerTenureInfo struct which owns connections to the chainstate and sortition + /// DBs, so that block-processing is guaranteed to terminate before the lives of these handles + /// expire. + pub(crate) fn inner_load_tenure_info<'a>( + &self, + chainstate: &'a mut StacksChainState, + burn_dbconn: &'a SortitionHandleConn, + cause: Option, + shadow_block: bool, + ) -> Result, Error> { + debug!("Nakamoto miner tenure begin"; "shadow" => shadow_block, "tenure_change" => ?cause); let Some(tenure_election_sn) = SortitionDB::get_block_snapshot_consensus(&burn_dbconn, &self.header.consensus_hash)? @@ -247,19 +274,25 @@ impl NakamotoBlockBuilder { ); return Err(Error::NoSuchBlockError); }; - let Some(tenure_block_commit) = SortitionDB::get_block_commit( - &burn_dbconn, - &tenure_election_sn.winning_block_txid, - &tenure_election_sn.sortition_id, - )? - else { - warn!("Could not find winning block commit for burn block that elected the miner"; - "consensus_hash" => %self.header.consensus_hash, - "stacks_block_hash" => %self.header.block_hash(), - "stacks_block_id" => %self.header.block_id(), - "winning_txid" => %tenure_election_sn.winning_block_txid - ); - return Err(Error::NoSuchBlockError); + + let tenure_block_commit_opt = if shadow_block { + None + } else { + let Some(tenure_block_commit) = SortitionDB::get_block_commit( + &burn_dbconn, + &tenure_election_sn.winning_block_txid, + &tenure_election_sn.sortition_id, + )? + else { + warn!("Could not find winning block commit for burn block that elected the miner"; + "consensus_hash" => %self.header.consensus_hash, + "stacks_block_hash" => %self.header.block_hash(), + "stacks_block_id" => %self.header.block_id(), + "winning_txid" => %tenure_election_sn.winning_block_txid + ); + return Err(Error::NoSuchBlockError); + }; + Some(tenure_block_commit) }; let elected_height = tenure_election_sn.block_height; @@ -363,11 +396,11 @@ impl NakamotoBlockBuilder { cause, coinbase_height, active_reward_set, - tenure_block_commit, + tenure_block_commit_opt, }) } - /// Begin/resume mining a tenure's transactions. + /// Begin/resume mining a (normal) tenure's transactions. /// Returns an open ClarityTx for mining the block. /// NOTE: even though we don't yet know the block hash, the Clarity VM ensures that a /// transaction can't query information about the _current_ block (i.e. information that is not @@ -377,6 +410,12 @@ impl NakamotoBlockBuilder { burn_dbconn: &'a SortitionHandleConn, info: &'b mut MinerTenureInfo<'a>, ) -> Result, Error> { + let Some(block_commit) = info.tenure_block_commit_opt.as_ref() else { + return Err(Error::InvalidStacksBlock( + "Block-commit is required; cannot mine a shadow block".into(), + )); + }; + let SetupBlockResult { clarity_tx, matured_miner_rewards_opt, @@ -389,7 +428,6 @@ impl NakamotoBlockBuilder { &burn_dbconn.context.pox_constants, info.parent_consensus_hash, info.parent_header_hash, - info.parent_stacks_block_height, info.parent_burn_block_height, info.burn_tip, info.burn_tip_height, @@ -397,7 +435,7 @@ impl NakamotoBlockBuilder { info.coinbase_height, info.cause == Some(TenureChangeCause::Extended), &self.header.pox_treatment, - &info.tenure_block_commit, + block_commit, &info.active_reward_set, )?; self.matured_miner_rewards_opt = matured_miner_rewards_opt; @@ -509,6 +547,7 @@ impl NakamotoBlockBuilder { tenure_info.tenure_change_tx(), tenure_info.coinbase_tx(), signer_bitvec_len, + None, )?; let ts_start = get_epoch_time_ms(); @@ -521,6 +560,37 @@ impl NakamotoBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); + let mut soft_limit = None; + if let Some(percentage) = settings + .mempool_settings + .tenure_cost_limit_per_block_percentage + { + // Make sure we aren't actually going to multiply by 0 or attempt to increase the block limit. + assert!( + (1..=100).contains(&percentage), + "BUG: tenure_cost_limit_per_block_percentage: {percentage}%. Must be between between 1 and 100" + ); + let mut remaining_limit = block_limit.clone(); + let cost_so_far = tenure_tx.cost_so_far(); + if remaining_limit.sub(&cost_so_far).is_ok() { + if remaining_limit.divide(100).is_ok() { + remaining_limit.multiply(percentage.into()).expect( + "BUG: failed to multiply by {percentage} when previously divided by 100", + ); + remaining_limit.add(&cost_so_far).expect("BUG: unexpected overflow when adding cost_so_far, which was previously checked"); + debug!( + "Setting soft limit for clarity cost to {percentage}% of remaining block limit"; + "remaining_limit" => %remaining_limit, + "cost_so_far" => %cost_so_far, + "block_limit" => %block_limit, + ); + soft_limit = Some(remaining_limit); + } + }; + } + + builder.soft_limit = soft_limit; + let initial_txs: Vec<_> = [ tenure_info.tenure_change_tx.clone(), tenure_info.coinbase_tx.clone(), @@ -607,26 +677,19 @@ impl BlockBuilder for NakamotoBlockBuilder { return TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError); } + let non_boot_code_contract_call = match &tx.payload { + TransactionPayload::ContractCall(cc) => !cc.address.is_boot_code_addr(), + TransactionPayload::SmartContract(..) => true, + _ => false, + }; + match limit_behavior { BlockLimitFunction::CONTRACT_LIMIT_HIT => { - match &tx.payload { - TransactionPayload::ContractCall(cc) => { - // once we've hit the runtime limit once, allow boot code contract calls, but do not try to eval - // other contract calls - if !cc.address.is_boot_code_addr() { - return TransactionResult::skipped( - &tx, - "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), - ); - } - } - TransactionPayload::SmartContract(..) => { - return TransactionResult::skipped( - &tx, - "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), - ); - } - _ => {} + if non_boot_code_contract_call { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), + ); } } BlockLimitFunction::LIMIT_REACHED => { @@ -653,70 +716,83 @@ impl BlockBuilder for NakamotoBlockBuilder { ); return TransactionResult::problematic(&tx, Error::NetError(e)); } - let (fee, receipt) = match StacksChainState::process_transaction( - clarity_tx, tx, quiet, ast_rules, - ) { - Ok((fee, receipt)) => (fee, receipt), - Err(e) => { - let (is_problematic, e) = - TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); - if is_problematic { - return TransactionResult::problematic(&tx, e); - } else { - match e { - Error::CostOverflowError(cost_before, cost_after, total_budget) => { - clarity_tx.reset_cost(cost_before.clone()); - if total_budget.proportion_largest_dimension(&cost_before) - < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC - { - warn!( - "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", - tx.txid(), - 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, - &total_budget - ); - let mut measured_cost = cost_after; - let measured_cost = if measured_cost.sub(&cost_before).is_ok() { - Some(measured_cost) - } else { - warn!( - "Failed to compute measured cost of a too big transaction" - ); - None - }; - return TransactionResult::error( - &tx, - Error::TransactionTooBigError(measured_cost), - ); - } else { - warn!( - "Transaction {} reached block cost {}; budget was {}", - tx.txid(), - &cost_after, - &total_budget - ); - return TransactionResult::skipped_due_to_error( - &tx, - Error::BlockTooBigError, - ); - } - } - _ => return TransactionResult::error(&tx, e), - } + + let cost_before = clarity_tx.cost_so_far(); + let (fee, receipt) = + match StacksChainState::process_transaction(clarity_tx, tx, quiet, ast_rules) { + Ok(x) => x, + Err(e) => { + return parse_process_transaction_error(clarity_tx, tx, e); } + }; + let cost_after = clarity_tx.cost_so_far(); + let mut soft_limit_reached = false; + // We only attempt to apply the soft limit to non-boot code contract calls. + if non_boot_code_contract_call { + if let Some(soft_limit) = self.soft_limit.as_ref() { + soft_limit_reached = cost_after.exceeds(soft_limit); } - }; + } + info!("Include tx"; "tx" => %tx.txid(), "payload" => tx.payload.name(), - "origin" => %tx.origin_address()); + "origin" => %tx.origin_address(), + "soft_limit_reached" => soft_limit_reached, + "cost_after" => %cost_after, + "cost_before" => %cost_before, + ); // save self.txs.push(tx.clone()); - TransactionResult::success(&tx, fee, receipt) + TransactionResult::success_with_soft_limit(&tx, fee, receipt, soft_limit_reached) }; self.bytes_so_far += tx_len; result } } + +fn parse_process_transaction_error( + clarity_tx: &mut ClarityTx, + tx: &StacksTransaction, + e: Error, +) -> TransactionResult { + let (is_problematic, e) = TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); + if is_problematic { + TransactionResult::problematic(&tx, e) + } else { + match e { + Error::CostOverflowError(cost_before, cost_after, total_budget) => { + clarity_tx.reset_cost(cost_before.clone()); + if total_budget.proportion_largest_dimension(&cost_before) + < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC + { + warn!( + "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", + tx.txid(), + 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, + &total_budget + ); + let mut measured_cost = cost_after; + let measured_cost = if measured_cost.sub(&cost_before).is_ok() { + Some(measured_cost) + } else { + warn!("Failed to compute measured cost of a too big transaction"); + None + }; + TransactionResult::error(&tx, Error::TransactionTooBigError(measured_cost)) + } else { + warn!( + "Transaction {} reached block cost {}; budget was {}", + tx.txid(), + &cost_after, + &total_budget + ); + TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError) + } + } + _ => TransactionResult::error(&tx, e), + } + } +} diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 6266543e2a..dbaf226015 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -119,6 +119,7 @@ use crate::{chainstate, monitoring}; pub mod coordinator; pub mod keys; pub mod miner; +pub mod shadow; pub mod signer_set; pub mod staging_blocks; pub mod tenure; @@ -289,6 +290,14 @@ lazy_static! { ); "#, ]; + + pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_5: [&'static str; 2] = [ + r#" + UPDATE db_config SET version = "8"; + "#, + // Add an index for index block hash in nakamoto block headers + "CREATE INDEX IF NOT EXISTS index_block_hash ON nakamoto_block_headers(index_block_hash);", + ]; } #[cfg(test)] @@ -320,7 +329,7 @@ pub trait StacksDBIndexed { fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError>; fn sqlite(&self) -> &Connection; - /// Get the ancestor block hash given a height + /// Get the ancestor block hash given a coinbase height fn get_ancestor_block_id( &mut self, coinbase_height: u64, @@ -832,6 +841,12 @@ impl NakamotoBlockHeader { )); }; + // if this is a shadow block, then its signing weight is as if every signer signed it, even + // though the signature vector is undefined. + if self.is_shadow_block() { + return Ok(self.get_shadow_signer_weight(reward_set)?); + } + let mut total_weight_signed: u32 = 0; // `last_index` is used to prevent out-of-order signatures let mut last_index = None; @@ -1392,6 +1407,7 @@ impl NakamotoBlock { "consensus_hash" => %self.header.consensus_hash, "stacks_block_hash" => %self.header.block_hash(), "stacks_block_id" => %self.block_id(), + "parent_block_id" => %self.header.parent_block_id, "commit_seed" => %block_commit.new_seed, "proof_seed" => %VRFSeed::from_proof(&parent_vrf_proof), "parent_vrf_proof" => %parent_vrf_proof.to_hex(), @@ -1425,10 +1441,15 @@ impl NakamotoBlock { } /// Verify the miner signature over this block. + /// If this is a shadow block, then this is always Ok(()) pub(crate) fn check_miner_signature( &self, miner_pubkey_hash160: &Hash160, ) -> Result<(), ChainstateError> { + if self.is_shadow_block() { + return Ok(()); + } + let recovered_miner_hash160 = self.recover_miner_pubkh()?; if &recovered_miner_hash160 != miner_pubkey_hash160 { warn!( @@ -1493,11 +1514,13 @@ impl NakamotoBlock { /// Verify that if this block has a coinbase, that its VRF proof is consistent with the leader /// public key's VRF key. If there is no coinbase tx, then this is a no-op. - pub(crate) fn check_coinbase_tx( + fn check_normal_coinbase_tx( &self, leader_vrf_key: &VRFPublicKey, sortition_hash: &SortitionHash, ) -> Result<(), ChainstateError> { + assert!(!self.is_shadow_block()); + // If this block has a coinbase, then verify that its VRF proof was generated by this // block's miner. We'll verify that the seed of this block-commit was generated from the // parnet tenure's VRF proof via the `validate_vrf_seed()` method, which requires that we @@ -1506,11 +1529,12 @@ impl NakamotoBlock { let (_, _, vrf_proof_opt) = coinbase_tx .try_as_coinbase() .expect("FATAL: `get_coinbase_tx()` did not return a coinbase"); + let vrf_proof = vrf_proof_opt.ok_or(ChainstateError::InvalidStacksBlock( "Nakamoto coinbase must have a VRF proof".into(), ))?; - // this block's VRF proof must have ben generated from the last sortition's sortition + // this block's VRF proof must have been generated from the last sortition's sortition // hash (which includes the last commit's VRF seed) let valid = match VRF::verify(leader_vrf_key, vrf_proof, sortition_hash.as_bytes()) { Ok(v) => v, @@ -1540,27 +1564,15 @@ impl NakamotoBlock { Ok(()) } - /// Validate this Nakamoto block header against burnchain state. - /// Used to determine whether or not we'll keep a block around (even if we don't yet have its parent). + /// Verify properties of blocks against the burnchain that are common to both normal and shadow + /// blocks. /// - /// Arguments - /// -- `tenure_burn_chain_tip` is the BlockSnapshot containing the block-commit for this block's - /// tenure. It is not always the tip of the burnchain. - /// -- `expected_burn` is the total number of burnchain tokens spent, if known. - /// -- `leader_key` is the miner's leader key registration transaction - /// - /// Verifies the following: /// -- (self.header.consensus_hash) that this block falls into this block-commit's tenure /// -- (self.header.burn_spent) that this block's burn total matches `burn_tip`'s total burn - /// -- (self.header.miner_signature) that this miner signed this block - /// -- if this block has a tenure change, then it's consistent with the miner's public key and - /// self.header.consensus_hash - /// -- if this block has a coinbase, then that it's VRF proof was generated by this miner - pub fn validate_against_burnchain( + fn common_validate_against_burnchain( &self, tenure_burn_chain_tip: &BlockSnapshot, expected_burn: Option, - leader_key: &LeaderKeyRegisterOp, ) -> Result<(), ChainstateError> { // this block's consensus hash must match the sortition that selected it if tenure_burn_chain_tip.consensus_hash != self.header.consensus_hash { @@ -1591,24 +1603,37 @@ impl NakamotoBlock { } } - // miner must have signed this block - let miner_pubkey_hash160 = leader_key - .interpret_nakamoto_signing_key() - .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { - warn!( - "Leader key did not contain a hash160 of the miner signing public key"; - "leader_key" => ?leader_key, - ); - e - })?; + Ok(()) + } - self.check_miner_signature(&miner_pubkey_hash160)?; + /// Validate this Nakamoto block header against burnchain state. + /// Used to determine whether or not we'll keep a block around (even if we don't yet have its parent). + /// + /// Arguments + /// -- `mainnet`: whether or not the chain is mainnet + /// -- `tenure_burn_chain_tip` is the BlockSnapshot containing the block-commit for this block's + /// tenure. It is not always the tip of the burnchain. + /// -- `expected_burn` is the total number of burnchain tokens spent, if known. + /// -- `leader_key` is the miner's leader key registration transaction + /// + /// Verifies the following: + /// -- (self.header.consensus_hash) that this block falls into this block-commit's tenure + /// -- (self.header.burn_spent) that this block's burn total matches `burn_tip`'s total burn + /// -- (self.header.miner_signature) that this miner signed this block + /// -- if this block has a tenure change, then it's consistent with the miner's public key and + /// self.header.consensus_hash + /// -- if this block has a coinbase, then that it's VRF proof was generated by this miner + fn validate_normal_against_burnchain( + &self, + tenure_burn_chain_tip: &BlockSnapshot, + expected_burn: Option, + miner_pubkey_hash160: &Hash160, + vrf_public_key: &VRFPublicKey, + ) -> Result<(), ChainstateError> { + self.common_validate_against_burnchain(tenure_burn_chain_tip, expected_burn)?; + self.check_miner_signature(miner_pubkey_hash160)?; self.check_tenure_tx()?; - self.check_coinbase_tx( - &leader_key.public_key, - &tenure_burn_chain_tip.sortition_hash, - )?; + self.check_normal_coinbase_tx(vrf_public_key, &tenure_burn_chain_tip.sortition_hash)?; // not verified by this method: // * chain_length (need parent block header) @@ -1860,23 +1885,36 @@ impl NakamotoChainState { let block_id = next_ready_block.block_id(); // find corresponding snapshot - let next_ready_block_snapshot = SortitionDB::get_block_snapshot_consensus( + let Some(next_ready_block_snapshot) = SortitionDB::get_block_snapshot_consensus( sort_db.conn(), &next_ready_block.header.consensus_hash, )? - .unwrap_or_else(|| { + else { + // might not have snapshot yet, even if the block is burn-attachable, because it could + // be a shadow block + if next_ready_block.is_shadow_block() { + test_debug!( + "Stop processing Nakamoto blocks at shadow block {}", + &next_ready_block.block_id() + ); + return Ok(None); + } + + // but this isn't allowed for non-shadow blocks, which must be marked burn-attachable + // separately panic!( "CORRUPTION: staging Nakamoto block {}/{} does not correspond to a burn block", &next_ready_block.header.consensus_hash, &next_ready_block.header.block_hash() - ) - }); + ); + }; debug!("Process staging Nakamoto block"; "consensus_hash" => %next_ready_block.header.consensus_hash, "stacks_block_hash" => %next_ready_block.header.block_hash(), "stacks_block_id" => %next_ready_block.header.block_id(), - "burn_block_hash" => %next_ready_block_snapshot.burn_header_hash + "burn_block_hash" => %next_ready_block_snapshot.burn_header_hash, + "parent_block_id" => %next_ready_block.header.parent_block_id, ); let elected_height = sort_db @@ -1977,7 +2015,7 @@ impl NakamotoChainState { )); }; - let (commit_burn, sortition_burn) = if new_tenure { + let (commit_burn, sortition_burn) = if new_tenure && !next_ready_block.is_shadow_block() { // find block-commit to get commit-burn let block_commit = SortitionDB::get_block_commit( sort_db.conn(), @@ -1990,6 +2028,7 @@ impl NakamotoChainState { SortitionDB::get_block_burn_amount(sort_db.conn(), &next_ready_block_snapshot)?; (block_commit.burn_fee, sort_burn) } else { + // non-tenure-change blocks and shadow blocks both have zero additional spends (0, 0) }; @@ -2025,6 +2064,7 @@ impl NakamotoChainState { commit_burn, sortition_burn, &active_reward_set, + false, ) { Ok(next_chain_tip_info) => (Some(next_chain_tip_info), None), Err(e) => (None, Some(e)), @@ -2175,21 +2215,17 @@ impl NakamotoChainState { Ok(Some(burn_view_sn.total_burn)) } - /// Validate that a Nakamoto block attaches to the burn chain state. - /// Called before inserting the block into the staging DB. - /// Wraps `NakamotoBlock::validate_against_burnchain()`, and - /// verifies that all transactions in the block are allowed in this epoch. - pub fn validate_nakamoto_block_burnchain( + /// Verify that the given Nakamoto block attaches to the canonical burnchain fork. + /// Return Ok(snapshot) on success, where `snapshot` is the sortition corresponding to this + /// block's tenure. + /// Return Err(..) otherwise + fn validate_nakamoto_tenure_snapshot( db_handle: &SortitionHandleConn, - expected_burn: Option, block: &NakamotoBlock, - mainnet: bool, - chain_id: u32, - ) -> Result<(), ChainstateError> { + ) -> Result { // find the sortition-winning block commit for this block, as well as the block snapshot // containing the parent block-commit. This is the snapshot that corresponds to when the // miner begain its tenure; it may not be the burnchain tip. - let block_hash = block.header.block_hash(); let consensus_hash = &block.header.consensus_hash; let sort_tip = SortitionDB::get_canonical_burn_chain_tip(db_handle)?; @@ -2198,7 +2234,7 @@ impl NakamotoChainState { let Some(tenure_burn_chain_tip) = SortitionDB::get_block_snapshot_consensus(db_handle, consensus_hash)? else { - warn!("No sortition for {}", &consensus_hash); + warn!("No sortition for {}", consensus_hash); return Err(ChainstateError::InvalidStacksBlock( "No sortition for block's consensus hash".into(), )); @@ -2225,7 +2261,58 @@ impl NakamotoChainState { )); }; - // the block-commit itself + Ok(tenure_burn_chain_tip) + } + + /// Statically validate the block's transactions against the burnchain epoch. + /// Return Ok(()) if they pass all static checks + /// Return Err(..) if not. + fn validate_nakamoto_block_transactions_static( + mainnet: bool, + chain_id: u32, + sortdb_conn: &Connection, + block: &NakamotoBlock, + block_tenure_burn_height: u64, + ) -> Result<(), ChainstateError> { + // check the _next_ block's tenure, since when Nakamoto's miner activates, the current chain tip + // will be in epoch 2.5 (the next block will be epoch 3.0) + let cur_epoch = SortitionDB::get_stacks_epoch(sortdb_conn, block_tenure_burn_height + 1)? + .expect("FATAL: no epoch defined for current Stacks block"); + + // static checks on transactions all pass + let valid = block.validate_transactions_static(mainnet, chain_id, cur_epoch.epoch_id); + if !valid { + warn!( + "Invalid Nakamoto block, transactions failed static checks: {}/{} (epoch {})", + &block.header.consensus_hash, + &block.header.block_hash(), + cur_epoch.epoch_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: failed static transaction checks".into(), + )); + } + + Ok(()) + } + + /// Validate that a normal Nakamoto block attaches to the burn chain state. + /// Called before inserting the block into the staging DB. + /// Wraps `NakamotoBlock::validate_against_burnchain()`, and + /// verifies that all transactions in the block are allowed in this epoch. + pub(crate) fn validate_normal_nakamoto_block_burnchain( + staging_db: NakamotoStagingBlocksConnRef, + db_handle: &SortitionHandleConn, + expected_burn: Option, + block: &NakamotoBlock, + mainnet: bool, + chain_id: u32, + ) -> Result<(), ChainstateError> { + assert!(!block.is_shadow_block()); + + let tenure_burn_chain_tip = Self::validate_nakamoto_tenure_snapshot(db_handle, block)?; + + // block-commit of this sortition let Some(block_commit) = db_handle.get_block_commit_by_txid( &tenure_burn_chain_tip.sortition_id, &tenure_burn_chain_tip.winning_block_txid, @@ -2233,13 +2320,20 @@ impl NakamotoChainState { else { warn!( "No block commit for {} in sortition for {}", - &tenure_burn_chain_tip.winning_block_txid, &consensus_hash + &tenure_burn_chain_tip.winning_block_txid, &block.header.consensus_hash ); return Err(ChainstateError::InvalidStacksBlock( "No block-commit in sortition for block's consensus hash".into(), )); }; + // if the *parent* of this block is a shadow block, then the block-commit's + // parent_vtxindex *MUST* be 0 and the parent_block_ptr *MUST* be the tenure of the + // shadow block. + // + // if the parent is not a shadow block, then this is a no-op. + Self::validate_shadow_parent_burnchain(staging_db, db_handle, block, &block_commit)?; + // key register of the winning miner let leader_key = db_handle .get_leader_key_at( @@ -2248,40 +2342,42 @@ impl NakamotoChainState { )? .expect("FATAL: have block commit but no leader key"); + // miner key hash160. + let miner_pubkey_hash160 = leader_key + .interpret_nakamoto_signing_key() + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!( + "Leader key did not contain a hash160 of the miner signing public key"; + "leader_key" => ?leader_key, + ); + e + })?; + // attaches to burn chain - if let Err(e) = - block.validate_against_burnchain(&tenure_burn_chain_tip, expected_burn, &leader_key) - { + if let Err(e) = block.validate_normal_against_burnchain( + &tenure_burn_chain_tip, + expected_burn, + &miner_pubkey_hash160, + &leader_key.public_key, + ) { warn!( "Invalid Nakamoto block, could not validate on burnchain"; - "consensus_hash" => %consensus_hash, - "stacks_block_hash" => %block_hash, + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), "error" => ?e ); return Err(e); } - // check the _next_ block's tenure, since when Nakamoto's miner activates, the current chain tip - // will be in epoch 2.5 (the next block will be epoch 3.0) - let cur_epoch = SortitionDB::get_stacks_epoch( + Self::validate_nakamoto_block_transactions_static( + mainnet, + chain_id, db_handle.deref(), - tenure_burn_chain_tip.block_height + 1, - )? - .expect("FATAL: no epoch defined for current Stacks block"); - - // static checks on transactions all pass - let valid = block.validate_transactions_static(mainnet, chain_id, cur_epoch.epoch_id); - if !valid { - warn!( - "Invalid Nakamoto block, transactions failed static checks: {}/{} (epoch {})", - consensus_hash, block_hash, cur_epoch.epoch_id - ); - return Err(ChainstateError::InvalidStacksBlock( - "Invalid Nakamoto block: failed static transaction checks".into(), - )); - } - + block, + tenure_burn_chain_tip.block_height, + )?; Ok(()) } @@ -2401,9 +2497,31 @@ impl NakamotoChainState { // checked on `::append_block()` let expected_burn_opt = Self::get_expected_burns(db_handle, headers_conn, block)?; + if block.is_shadow_block() { + // this block is already present in the staging DB, so just perform some prefunctory + // validation (since they're constructed a priori to be valid) + if let Err(e) = Self::validate_shadow_nakamoto_block_burnchain( + staging_db_tx.conn(), + db_handle, + expected_burn_opt, + block, + config.mainnet, + config.chain_id, + ) { + error!("Unacceptable shadow Nakamoto block"; + "stacks_block_id" => %block.block_id(), + "error" => ?e + ); + panic!("Unacceptable shadow Nakamoto block"); + } + + return Ok(false); + } + // this block must be consistent with its miner's leader-key and block-commit, and must // contain only transactions that are valid in this epoch. - if let Err(e) = Self::validate_nakamoto_block_burnchain( + if let Err(e) = Self::validate_normal_nakamoto_block_burnchain( + staging_db_tx.conn(), db_handle, expected_burn_opt, block, @@ -2515,6 +2633,24 @@ impl NakamotoChainState { Ok(None) } + /// Load the block version of a Nakamoto blocok + pub fn get_nakamoto_block_version( + chainstate_conn: &Connection, + index_block_hash: &StacksBlockId, + ) -> Result, ChainstateError> { + let sql = "SELECT version FROM nakamoto_block_headers WHERE index_block_hash = ?1"; + let args = rusqlite::params![index_block_hash]; + let mut stmt = chainstate_conn.prepare(sql)?; + let result = stmt + .query_row(args, |row| { + let version: u8 = row.get(0)?; + Ok(version) + }) + .optional()?; + + Ok(result) + } + /// Load the parent block ID of a Nakamoto block pub fn get_nakamoto_parent_block_id( chainstate_conn: &Connection, @@ -2786,6 +2922,12 @@ impl NakamotoChainState { consensus_hash: &ConsensusHash, block_commit_txid: &Txid, ) -> Result { + // is the tip a shadow block (and necessarily a Nakamoto block)? + if let Some(shadow_vrf_proof) = Self::get_shadow_vrf_proof(chainstate_conn, tip_block_id)? { + return Ok(shadow_vrf_proof); + } + + // parent tenure is a normal tenure let sn = SortitionDB::get_block_snapshot_consensus(sortdb_conn, consensus_hash)?.ok_or( ChainstateError::InvalidStacksBlock("No sortition for consensus hash".into()), )?; @@ -2807,7 +2949,10 @@ impl NakamotoChainState { let parent_vrf_proof = Self::get_block_vrf_proof(chainstate_conn, tip_block_id, &parent_sn.consensus_hash)? - .ok_or(ChainstateError::NoSuchBlockError) + .ok_or_else(|| { + warn!("No VRF proof for {}", &parent_sn.consensus_hash); + ChainstateError::NoSuchBlockError + }) .map_err(|e| { warn!("Could not find parent VRF proof"; "tip_block_id" => %tip_block_id, @@ -2927,6 +3072,11 @@ impl NakamotoChainState { sortdb_conn: &Connection, block: &NakamotoBlock, ) -> Result<(), ChainstateError> { + if block.is_shadow_block() { + // no-op + return Ok(()); + } + // get the block-commit for this block let sn = SortitionDB::get_block_snapshot_consensus(sortdb_conn, &block.header.consensus_hash)? @@ -3530,6 +3680,143 @@ impl NakamotoChainState { )) } + /// Begin block-processing for a normal block and return all of the pre-processed state within a + /// `SetupBlockResult`. Used by the Nakamoto miner, and called by Self::setup_normal_block() + pub fn setup_block<'a, 'b>( + chainstate_tx: &'b mut ChainstateTx, + clarity_instance: &'a mut ClarityInstance, + sortition_dbconn: &'b dyn SortitionDBRef, + first_block_height: u64, + pox_constants: &PoxConstants, + parent_consensus_hash: ConsensusHash, + parent_header_hash: BlockHeaderHash, + parent_burn_height: u32, + burn_header_hash: BurnchainHeaderHash, + burn_header_height: u32, + new_tenure: bool, + coinbase_height: u64, + tenure_extend: bool, + block_bitvec: &BitVec<4000>, + tenure_block_commit: &LeaderBlockCommitOp, + active_reward_set: &RewardSet, + ) -> Result, ChainstateError> { + // this block's bitvec header must match the miner's block commit punishments + Self::check_pox_bitvector(block_bitvec, tenure_block_commit, active_reward_set)?; + Self::inner_setup_block( + chainstate_tx, + clarity_instance, + sortition_dbconn, + first_block_height, + pox_constants, + parent_consensus_hash, + parent_header_hash, + parent_burn_height, + burn_header_hash, + burn_header_height, + new_tenure, + coinbase_height, + tenure_extend, + ) + } + + /// Begin block-processing for a normal block and return all of the pre-processed state within a + /// `SetupBlockResult`. + /// + /// Called as part of block processing + fn setup_normal_block_processing<'a, 'b>( + chainstate_tx: &'b mut ChainstateTx, + clarity_instance: &'a mut ClarityInstance, + sortition_dbconn: &'b dyn SortitionDBRef, + first_block_height: u64, + pox_constants: &PoxConstants, + parent_chain_tip: &StacksHeaderInfo, + parent_consensus_hash: ConsensusHash, + parent_header_hash: BlockHeaderHash, + parent_burn_height: u32, + tenure_block_snapshot: BlockSnapshot, + block: &NakamotoBlock, + new_tenure: bool, + coinbase_height: u64, + tenure_extend: bool, + block_bitvec: &BitVec<4000>, + active_reward_set: &RewardSet, + ) -> Result, ChainstateError> { + let burn_header_hash = tenure_block_snapshot.burn_header_hash.clone(); + let burn_header_height = + u32::try_from(tenure_block_snapshot.block_height).map_err(|_| { + ChainstateError::InvalidStacksBlock( + "Could not downcast burn block height to u32".into(), + ) + })?; + let tenure_block_commit = SortitionDB::get_block_commit( + sortition_dbconn.sqlite_conn(), + &tenure_block_snapshot.winning_block_txid, + &tenure_block_snapshot.sortition_id, + )? + .ok_or_else(|| { + warn!("Invalid Nakamoto block: has no block-commit in its sortition"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id(), + "sortition_id" => %tenure_block_snapshot.sortition_id, + "block_commit_txid" => %tenure_block_snapshot.winning_block_txid + ); + ChainstateError::NoSuchBlockError + })?; + + // this block's tenure's block-commit contains the hash of the parent tenure's tenure-start + // block. + // (note that we can't check this earlier, since we need the parent tenure to have been + // processed) + if new_tenure && parent_chain_tip.is_nakamoto_block() && !block.is_first_mined() { + let parent_block_id = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); + let parent_tenure_start_header = Self::get_nakamoto_tenure_start_block_header( + chainstate_tx.as_tx(), + &parent_block_id, + &parent_consensus_hash, + )? + .ok_or_else(|| { + warn!("Invalid Nakamoto block: no start-tenure block for parent"; + "parent_consensus_hash" => %parent_consensus_hash, + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "stacks_block_id" => %block.header.block_id()); + ChainstateError::NoSuchBlockError + })?; + + if parent_tenure_start_header.index_block_hash() != tenure_block_commit.last_tenure_id() + { + warn!("Invalid Nakamoto block: its tenure's block-commit's block ID hash does not match its parent tenure's start block"; + "parent_consensus_hash" => %parent_consensus_hash, + "parent_tenure_start_block_id" => %parent_tenure_start_header.index_block_hash(), + "block_commit.last_tenure_id" => %tenure_block_commit.last_tenure_id(), + "parent_tip" => %parent_block_id, + ); + test_debug!("Faulty commit: {:?}", &tenure_block_commit); + + return Err(ChainstateError::NoSuchBlockError); + } + } + Self::setup_block( + chainstate_tx, + clarity_instance, + sortition_dbconn, + first_block_height, + pox_constants, + parent_consensus_hash, + parent_header_hash, + parent_burn_height, + burn_header_hash, + burn_header_height, + new_tenure, + coinbase_height, + tenure_extend, + block_bitvec, + &tenure_block_commit, + active_reward_set, + ) + } + /// Begin block-processing and return all of the pre-processed state within a /// `SetupBlockResult`. /// @@ -3554,15 +3841,12 @@ impl NakamotoChainState { /// * coinbase_height: the number of tenures that this block confirms (including epoch2 blocks) /// (this is equivalent to the number of coinbases) /// * tenure_extend: whether or not to reset the tenure's ongoing execution cost - /// * block_bitvec: the bitvec that will control PoX reward handling for this block - /// * tenure_block_commit: the block commit that elected this miner - /// * active_reward_set: the reward and signer set active during `tenure_block_commit` /// /// Returns clarity_tx, list of receipts, microblock execution cost, /// microblock fees, microblock burns, list of microblock tx receipts, /// miner rewards tuples, the stacks epoch id, and a boolean that /// represents whether the epoch transition has been applied. - pub fn setup_block<'a, 'b>( + fn inner_setup_block<'a, 'b>( chainstate_tx: &'b mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, sortition_dbconn: &'b dyn SortitionDBRef, @@ -3570,19 +3854,13 @@ impl NakamotoChainState { pox_constants: &PoxConstants, parent_consensus_hash: ConsensusHash, parent_header_hash: BlockHeaderHash, - _parent_stacks_height: u64, parent_burn_height: u32, burn_header_hash: BurnchainHeaderHash, burn_header_height: u32, new_tenure: bool, coinbase_height: u64, tenure_extend: bool, - block_bitvec: &BitVec<4000>, - tenure_block_commit: &LeaderBlockCommitOp, - active_reward_set: &RewardSet, ) -> Result, ChainstateError> { - Self::check_pox_bitvector(block_bitvec, tenure_block_commit, active_reward_set)?; - let parent_index_hash = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); let parent_sortition_id = sortition_dbconn .get_sortition_id_from_consensus_hash(&parent_consensus_hash) @@ -3818,79 +4096,84 @@ impl NakamotoChainState { Ok(lockup_events) } + /// Verify that the PoX bitvector from the block header is consistent with the block-commit's + /// PoX outputs, as determined by the active reward set and whether or not the 0's in the + /// bitvector correspond to signers' PoX outputs. fn check_pox_bitvector( block_bitvec: &BitVec<4000>, tenure_block_commit: &LeaderBlockCommitOp, active_reward_set: &RewardSet, ) -> Result<(), ChainstateError> { - if !tenure_block_commit.treatment.is_empty() { - let address_to_indeces: HashMap<_, Vec<_>> = active_reward_set - .rewarded_addresses + if tenure_block_commit.treatment.is_empty() { + return Ok(()); + } + + let address_to_indeces: HashMap<_, Vec<_>> = active_reward_set + .rewarded_addresses + .iter() + .enumerate() + .fold(HashMap::new(), |mut map, (ix, addr)| { + map.entry(addr).or_insert_with(Vec::new).push(ix); + map + }); + + // our block commit issued a punishment, check the reward set and bitvector + // to ensure that this was valid. + for treated_addr in tenure_block_commit.treatment.iter() { + if treated_addr.is_burn() { + // Don't need to assert anything about burn addresses. + // If they were in the reward set, "punishing" them is meaningless. + continue; + } + // otherwise, we need to find the indices in the rewarded_addresses + // corresponding to this address. + let empty_vec = vec![]; + let address_indices = address_to_indeces + .get(treated_addr.deref()) + .unwrap_or(&empty_vec); + + // if any of them are 0, punishment is okay. + // if all of them are 1, punishment is not okay. + // if all of them are 0, *must* have punished + let bitvec_values: Result, ChainstateError> = address_indices .iter() - .enumerate() - .fold(HashMap::new(), |mut map, (ix, addr)| { - map.entry(addr).or_insert_with(Vec::new).push(ix); - map - }); - - // our block commit issued a punishment, check the reward set and bitvector - // to ensure that this was valid. - for treated_addr in tenure_block_commit.treatment.iter() { - if treated_addr.is_burn() { - // Don't need to assert anything about burn addresses. - // If they were in the reward set, "punishing" them is meaningless. - continue; - } - // otherwise, we need to find the indices in the rewarded_addresses - // corresponding to this address. - let empty_vec = vec![]; - let address_indices = address_to_indeces - .get(treated_addr.deref()) - .unwrap_or(&empty_vec); - - // if any of them are 0, punishment is okay. - // if all of them are 1, punishment is not okay. - // if all of them are 0, *must* have punished - let bitvec_values: Result, ChainstateError> = address_indices - .iter() - .map( - |ix| { - let ix = u16::try_from(*ix) - .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; - let bitvec_value = block_bitvec.get(ix) - .unwrap_or_else(|| { - warn!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); - true - }); - Ok(bitvec_value) - } - ) - .collect(); - let bitvec_values = bitvec_values?; - let all_1 = bitvec_values.iter().all(|x| *x); - let all_0 = bitvec_values.iter().all(|x| !x); - if all_1 { - if treated_addr.is_punish() { - warn!( - "Invalid Nakamoto block: punished PoX address when bitvec contained 1s for the address"; - "reward_address" => %treated_addr.deref(), - "bitvec_values" => ?bitvec_values, - ); - return Err(ChainstateError::InvalidStacksBlock( - "Bitvec does not match the block commit's PoX handling".into(), - )); - } - } else if all_0 { - if treated_addr.is_reward() { - warn!( - "Invalid Nakamoto block: rewarded PoX address when bitvec contained 0s for the address"; - "reward_address" => %treated_addr.deref(), - "bitvec_values" => ?bitvec_values, - ); - return Err(ChainstateError::InvalidStacksBlock( - "Bitvec does not match the block commit's PoX handling".into(), - )); + .map( + |ix| { + let ix = u16::try_from(*ix) + .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set index outside of u16".into()))?; + let bitvec_value = block_bitvec.get(ix) + .unwrap_or_else(|| { + warn!("Block header's bitvec is smaller than the reward set, defaulting higher indexes to 1"); + true + }); + Ok(bitvec_value) } + ) + .collect(); + let bitvec_values = bitvec_values?; + let all_1 = bitvec_values.iter().all(|x| *x); + let all_0 = bitvec_values.iter().all(|x| !x); + if all_1 { + if treated_addr.is_punish() { + warn!( + "Invalid Nakamoto block: punished PoX address when bitvec contained 1s for the address"; + "reward_address" => %treated_addr.deref(), + "bitvec_values" => ?bitvec_values, + ); + return Err(ChainstateError::InvalidStacksBlock( + "Bitvec does not match the block commit's PoX handling".into(), + )); + } + } else if all_0 { + if treated_addr.is_reward() { + warn!( + "Invalid Nakamoto block: rewarded PoX address when bitvec contained 0s for the address"; + "reward_address" => %treated_addr.deref(), + "bitvec_values" => ?bitvec_values, + ); + return Err(ChainstateError::InvalidStacksBlock( + "Bitvec does not match the block commit's PoX handling".into(), + )); } } } @@ -3898,6 +4181,62 @@ impl NakamotoChainState { Ok(()) } + pub(crate) fn make_non_advancing_receipt<'a>( + clarity_commit: PreCommitClarityBlock<'a>, + burn_dbconn: &SortitionHandleConn, + parent_ch: &ConsensusHash, + evaluated_epoch: StacksEpochId, + matured_rewards: Vec, + tx_receipts: Vec, + matured_rewards_info_opt: Option, + block_execution_cost: ExecutionCost, + applied_epoch_transition: bool, + signers_updated: bool, + coinbase_height: u64, + ) -> Result< + ( + StacksEpochReceipt, + PreCommitClarityBlock<'a>, + Option, + ), + ChainstateError, + > { + // get burn block stats, for the transaction receipt + + let parent_sn = SortitionDB::get_block_snapshot_consensus(burn_dbconn, &parent_ch)? + .ok_or_else(|| { + // shouldn't happen + warn!( + "CORRUPTION: {} does not correspond to a burn block", + &parent_ch + ); + ChainstateError::InvalidStacksBlock("No parent consensus hash".into()) + })?; + let (parent_burn_block_hash, parent_burn_block_height, parent_burn_block_timestamp) = ( + parent_sn.burn_header_hash, + parent_sn.block_height, + parent_sn.burn_header_timestamp, + ); + + let epoch_receipt = StacksEpochReceipt { + header: StacksHeaderInfo::regtest_genesis(), + tx_receipts, + matured_rewards, + matured_rewards_info: matured_rewards_info_opt, + parent_microblocks_cost: ExecutionCost::zero(), + anchored_block_cost: block_execution_cost, + parent_burn_block_hash, + parent_burn_block_height: u32::try_from(parent_burn_block_height).unwrap_or(0), // shouldn't be fatal + parent_burn_block_timestamp, + evaluated_epoch, + epoch_transition: applied_epoch_transition, + signers_updated, + coinbase_height, + }; + + return Ok((epoch_receipt, clarity_commit, None)); + } + /// Append a Nakamoto Stacks block to the Stacks chain state. /// NOTE: This does _not_ set the block as processed! The caller must do this. pub(crate) fn append_block<'a>( @@ -3915,6 +4254,7 @@ impl NakamotoChainState { burnchain_commit_burn: u64, burnchain_sortition_burn: u64, active_reward_set: &RewardSet, + do_not_advance: bool, ) -> Result< ( StacksEpochReceipt, @@ -3960,8 +4300,6 @@ impl NakamotoChainState { // It must exist in the same Bitcoin fork as our `burn_dbconn`. let tenure_block_snapshot = Self::check_sortition_exists(burn_dbconn, &block.header.consensus_hash)?; - let burn_header_hash = tenure_block_snapshot.burn_header_hash.clone(); - let burn_header_height = tenure_block_snapshot.block_height; let block_hash = block.header.block_hash(); let new_tenure = block.is_wellformed_tenure_start_block().map_err(|_| { @@ -4038,54 +4376,6 @@ impl NakamotoChainState { )); } - // this block's bitvec header must match the miner's block commit punishments - let tenure_block_commit = SortitionDB::get_block_commit( - burn_dbconn.conn(), - &tenure_block_snapshot.winning_block_txid, - &tenure_block_snapshot.sortition_id, - )? - .ok_or_else(|| { - warn!("Invalid Nakamoto block: has no block-commit in its sortition"; - "consensus_hash" => %block.header.consensus_hash, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id(), - "sortition_id" => %tenure_block_snapshot.sortition_id, - "block_commit_txid" => %tenure_block_snapshot.winning_block_txid - ); - ChainstateError::NoSuchBlockError - })?; - - // this block's tenure's block-commit contains the hash of the parent tenure's tenure-start - // block. - // (note that we can't check this earlier, since we need the parent tenure to have been - // processed) - if new_tenure && parent_chain_tip.is_nakamoto_block() && !block.is_first_mined() { - let parent_tenure_start_header = Self::get_nakamoto_tenure_start_block_header( - chainstate_tx.as_tx(), - &parent_block_id, - &parent_ch, - )? - .ok_or_else(|| { - warn!("Invalid Nakamoto block: no start-tenure block for parent"; - "parent_consensus_hash" => %parent_ch, - "consensus_hash" => %block.header.consensus_hash, - "stacks_block_hash" => %block.header.block_hash(), - "stacks_block_id" => %block.header.block_id()); - ChainstateError::NoSuchBlockError - })?; - - if parent_tenure_start_header.index_block_hash() != tenure_block_commit.last_tenure_id() - { - warn!("Invalid Nakamoto block: its tenure's block-commit's block ID hash does not match its parent tenure's start block"; - "parent_consensus_hash" => %parent_ch, - "parent_tenure_start_block_id" => %parent_tenure_start_header.index_block_hash(), - "block_commit.last_tenure_id" => %tenure_block_commit.last_tenure_id() - ); - - return Err(ChainstateError::NoSuchBlockError); - } - } - // verify VRF proof, if present // only need to do this once per tenure // get the resulting vrf proof bytes @@ -4109,6 +4399,7 @@ impl NakamotoChainState { burn_dbconn, block, parent_coinbase_height, + do_not_advance, )?; if new_tenure { // tenure height must have advanced @@ -4144,27 +4435,43 @@ impl NakamotoChainState { mut auto_unlock_events, signer_set_calc, burn_vote_for_aggregate_key_ops, - } = Self::setup_block( - chainstate_tx, - clarity_instance, - burn_dbconn, - first_block_height, - pox_constants, - parent_ch, - parent_block_hash, - parent_chain_tip.stacks_block_height, - parent_chain_tip.burn_header_height, - burn_header_hash, - burn_header_height.try_into().map_err(|_| { - ChainstateError::InvalidStacksBlock("Burn block height exceeded u32".into()) - })?, - new_tenure, - coinbase_height, - tenure_extend, - &block.header.pox_treatment, - &tenure_block_commit, - active_reward_set, - )?; + } = if block.is_shadow_block() { + // shadow block + Self::setup_shadow_block_processing( + chainstate_tx, + clarity_instance, + burn_dbconn, + first_block_height, + pox_constants, + parent_ch, + parent_block_hash, + parent_chain_tip.burn_header_height, + tenure_block_snapshot, + new_tenure, + coinbase_height, + tenure_extend, + )? + } else { + // normal block + Self::setup_normal_block_processing( + chainstate_tx, + clarity_instance, + burn_dbconn, + first_block_height, + pox_constants, + &parent_chain_tip, + parent_ch, + parent_block_hash, + parent_chain_tip.burn_header_height, + tenure_block_snapshot, + block, + new_tenure, + coinbase_height, + tenure_extend, + &block.header.pox_treatment, + active_reward_set, + )? + }; let starting_cost = clarity_tx.cost_so_far(); @@ -4289,6 +4596,24 @@ impl NakamotoChainState { .as_ref() .map(|rewards| rewards.reward_info.clone()); + if do_not_advance { + // if we're performing a block replay, and we don't want to advance any + // of the db state, return a fake receipt + return Self::make_non_advancing_receipt( + clarity_commit, + burn_dbconn, + &parent_ch, + evaluated_epoch, + matured_rewards, + tx_receipts, + matured_rewards_info_opt, + block_execution_cost, + applied_epoch_transition, + signer_set_calc.is_some(), + coinbase_height, + ); + } + let new_tip = Self::advance_tip( &mut chainstate_tx.tx, &parent_chain_tip.anchored_header, diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs new file mode 100644 index 0000000000..cdc099e120 --- /dev/null +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -0,0 +1,1008 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clarity::vm::costs::ExecutionCost; +use rusqlite::params; +/// Shadow blocks +/// +/// In the event of an emergency chain halt, a SIP will be written to declare that a chain halt has +/// happened, and what transactions and blocks (if any) need to be mined at which burnchain block +/// heights to recover the chain. +/// +/// If this remedy is necessary, these blocks will be mined into one or more _shadow_ blocks and +/// _shadow_ tenures. +/// +/// Shadow blocks are blocks that are inserted directly into the staging blocks DB as part of a +/// schema update. They are neither mined nor relayed. Instead, they are synthesized as part of an +/// emergency node upgrade in order to ensure that the conditions which lead to the chain stall +/// never occur. +/// +/// For example, if a prepare phase is mined without a single block-commit hitting the Bitcoin +/// chain, a pair of shadow block tenures will be synthesized to create a PoX anchor block and +/// restore the chain's liveness. As another example, if insufficiently many STX are locked in PoX +/// to get a healthy set of signers, a shadow block can be synthesized with extra `stack-stx` +/// transactions submitted from healthy stackers in order to create a suitable PoX reward set. +/// +/// This module contains shadow block-specific logic for the Nakamoto block header, Nakamoto block, +/// Nakamoto chainstate, and Nakamoto miner structures. +use rusqlite::Connection; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, +}; +use stacks_common::util::hash::Hash160; +use stacks_common::util::vrf::VRFProof; + +use crate::burnchains::PoxConstants; +use crate::chainstate::nakamoto::miner::{MinerTenureInfo, NakamotoBlockBuilder}; +use crate::chainstate::nakamoto::{ + BlockSnapshot, ChainstateError, LeaderBlockCommitOp, NakamotoBlock, NakamotoBlockHeader, + NakamotoBlockObtainMethod, NakamotoChainState, NakamotoStagingBlocksConn, + NakamotoStagingBlocksConnRef, NakamotoStagingBlocksTx, SetupBlockResult, SortitionDB, + SortitionHandleConn, StacksDBIndexed, +}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::blocks::DummyEventDispatcher; +use crate::chainstate::stacks::db::{ + ChainstateTx, ClarityTx, StacksAccount, StacksChainState, StacksHeaderInfo, +}; +use crate::chainstate::stacks::miner::{ + BlockBuilder, BlockLimitFunction, TransactionError, TransactionProblematic, TransactionResult, + TransactionSkipped, +}; +use crate::chainstate::stacks::{ + CoinbasePayload, Error, StacksTransaction, StacksTransactionSigner, TenureChangeCause, + TenureChangePayload, TransactionAnchorMode, TransactionAuth, TransactionPayload, + TransactionVersion, +}; +use crate::clarity::vm::types::StacksAddressExtensions; +use crate::clarity_vm::clarity::ClarityInstance; +use crate::clarity_vm::database::SortitionDBRef; +use crate::net::Error as NetError; +use crate::util_lib::db::{query_row, u64_to_sql, Error as DBError}; + +impl NakamotoBlockHeader { + /// Is this a shadow block? + /// + /// This is a special kind of block that is directly inserted into the chainstate by means of a + /// consensus rule. It won't be downloaded or broadcasted, but every node will have it. They + /// get created as a result of a consensus-level SIP in order to restore the chain to working + /// order. + /// + /// Shadow blocks have the high bit of their version field set. + pub fn is_shadow_block(&self) -> bool { + Self::is_shadow_block_version(self.version) + } + + /// Is a block version a shadow block version? + pub fn is_shadow_block_version(version: u8) -> bool { + version & 0x80 != 0 + } + + /// Get the signing weight of a shadow block + pub fn get_shadow_signer_weight(&self, reward_set: &RewardSet) -> Result { + let Some(signers) = &reward_set.signers else { + return Err(ChainstateError::InvalidStacksBlock( + "No signers in the reward set".to_string(), + )); + }; + let shadow_weight = signers + .iter() + .fold(0u32, |acc, signer| acc.saturating_add(signer.weight)); + + Ok(shadow_weight) + } +} + +impl NakamotoBlock { + /// Is this block a shadow block? + /// Check the header + pub fn is_shadow_block(&self) -> bool { + self.header.is_shadow_block() + } + + /// Verify that if this shadow block has a coinbase, that its VRF proof is consistent with the leader + /// public key's VRF key. If there is no coinbase tx, then this is a no-op. + pub(crate) fn check_shadow_coinbase_tx(&self, mainnet: bool) -> Result<(), ChainstateError> { + if !self.is_shadow_block() { + error!( + "FATAL: tried to validate non-shadow block in a shadow-block-specific validator" + ); + panic!(); + } + + // If this shadow block has a coinbase, then verify that it has a VRF proof (which will be + // verified later) and that its recipient is the burn address. Shadow blocks do not award + // STX. + if let Some(coinbase_tx) = self.get_coinbase_tx() { + let (_, recipient_opt, vrf_proof_opt) = coinbase_tx + .try_as_coinbase() + .expect("FATAL: `get_coinbase_tx()` did not return a coinbase"); + + if vrf_proof_opt.is_none() { + return Err(ChainstateError::InvalidStacksBlock( + "Shadow Nakamoto coinbase must have a VRF proof".into(), + )); + } + + let Some(recipient) = recipient_opt else { + warn!("Invalid shadow block: no recipient"); + return Err(ChainstateError::InvalidStacksBlock( + "Shadow block did not pay to burn address".into(), + )); + }; + + // must be the standard burn address for this network + let burn_addr = StacksAddress::burn_address(mainnet).to_account_principal(); + if burn_addr != *recipient { + warn!("Invalid shadow block: recipient does not burn"); + return Err(ChainstateError::InvalidStacksBlock( + "Shadow block did not pay to burn address".into(), + )); + } + + // can't check the VRF proof because the creator of the shadow block (e.g. the SIP + // process) isn't a miner, so it could be anything. + } + Ok(()) + } + + /// Validate this Nakamoto shadow block header against burnchain state. + /// + /// Arguments + /// -- `mainnet`: whether or not the chain is mainnet + /// -- `tenure_burn_chain_tip` is the BlockSnapshot containing the block-commit for this block's + /// tenure. It is not always the tip of the burnchain. + /// -- `expected_burn` is the total number of burnchain tokens spent, if known. + /// + /// Verifies the following: + /// -- (self.header.consensus_hash) that this block falls into this block-commit's tenure + /// -- (self.header.burn_spent) that this block's burn total matches `burn_tip`'s total burn + /// -- if this block has a tenure change, then it's consistent with the miner's public key and + /// self.header.consensus_hash + /// + /// NOTE: unlike normal blocks, we do not need to verify the VRF proof or miner signature + pub(crate) fn validate_shadow_against_burnchain( + &self, + mainnet: bool, + tenure_burn_chain_tip: &BlockSnapshot, + expected_burn: Option, + ) -> Result<(), ChainstateError> { + if !self.is_shadow_block() { + error!( + "FATAL: tried to validate non-shadow block in a shadow-block-specific validator" + ); + panic!(); + } + self.common_validate_against_burnchain(tenure_burn_chain_tip, expected_burn)?; + self.check_tenure_tx()?; + self.check_shadow_coinbase_tx(mainnet)?; + + // not verified by this method: + // * chain_length (need parent block header) + // * parent_block_id (need parent block header) + // * block-commit seed (need parent block) + // * tx_merkle_root (already verified; validated on deserialization) + // * state_index_root (validated on process_block()) + // * stacker signature (validated on accept_block()) + Ok(()) + } +} + +impl NakamotoChainState { + /// Verify that the shadow parent of a normal block is consistent with the normal block's + /// tenure's block-commit. + /// + /// * the block-commit vtxindex must be 0 (i.e. burnchain coinbase) + /// * the block-commit block ptr must be the shadow parent tenure's sortition + /// + /// Returns Ok(()) if the parent is _not_ a shadow block + /// Returns Ok(()) if the parent is a shadow block, and the above criteria are met + /// Returns Err(ChainstateError::InvalidStacksBlock(..)) if the parent is a shadow block, and + /// some of the criteria above are false + /// Returns Err(..) on other (DB-related) errors + pub(crate) fn validate_shadow_parent_burnchain( + staging_db: NakamotoStagingBlocksConnRef, + db_handle: &SortitionHandleConn, + block: &NakamotoBlock, + block_commit: &LeaderBlockCommitOp, + ) -> Result<(), ChainstateError> { + // only applies if the parent is a nakamoto block (since all shadow blocks are nakamoto + // blocks) + let Some(parent_header) = + staging_db.get_nakamoto_block_header(&block.header.parent_block_id)? + else { + return Ok(()); + }; + + if !parent_header.is_shadow_block() { + return Ok(()); + } + + if block_commit.parent_vtxindex != 0 { + warn!("Invalid Nakamoto block: parent {} of {} is a shadow block but block-commit vtxindex is {}", &parent_header.block_id(), &block.block_id(), block_commit.parent_vtxindex); + return Err(ChainstateError::InvalidStacksBlock("Invalid Nakamoto block: invalid block-commit parent vtxindex for parent shadow block".into())); + } + let Some(parent_sn) = + SortitionDB::get_block_snapshot_consensus(db_handle, &parent_header.consensus_hash)? + else { + warn!( + "Invalid Nakamoto block: No sortition for parent shadow block {}", + &block.header.parent_block_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: parent shadow block has no sortition".into(), + )); + }; + if u64::from(block_commit.parent_block_ptr) != parent_sn.block_height { + warn!("Invalid Nakamoto block: parent {} of {} is a shadow block but block-commit parent ptr is {}", &parent_header.block_id(), &block.block_id(), block_commit.parent_block_ptr); + return Err(ChainstateError::InvalidStacksBlock("Invalid Nakamoto block: invalid block-commit parent block ptr for parent shadow block".into())); + } + + Ok(()) + } + + /// Validate a shadow Nakamoto block against burnchain state. + /// Wraps `NakamotoBlock::validate_shadow_against_burnchain()`, and + /// verifies that all transactions in the block are allowed in this epoch. + pub(crate) fn validate_shadow_nakamoto_block_burnchain( + staging_db: NakamotoStagingBlocksConnRef, + db_handle: &SortitionHandleConn, + expected_burn: Option, + block: &NakamotoBlock, + mainnet: bool, + chain_id: u32, + ) -> Result<(), ChainstateError> { + if !block.is_shadow_block() { + error!( + "FATAL: tried to validate non-shadow block in a shadow-block-specific validator" + ); + panic!(); + } + + // this block must already be stored + if !staging_db.has_shadow_nakamoto_block_with_index_hash(&block.block_id())? { + warn!("Invalid shadow Nakamoto block, must already be stored"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "block_id" => %block.header.block_id() + ); + + return Err(ChainstateError::InvalidStacksBlock( + "Shadow block must already be stored".into(), + )); + } + + let tenure_burn_chain_tip = Self::validate_nakamoto_tenure_snapshot(db_handle, block)?; + if let Err(e) = + block.validate_shadow_against_burnchain(mainnet, &tenure_burn_chain_tip, expected_burn) + { + warn!( + "Invalid shadow Nakamoto block, could not validate on burnchain"; + "consensus_hash" => %block.header.consensus_hash, + "stacks_block_hash" => %block.header.block_hash(), + "block_id" => %block.header.block_id(), + "error" => ?e + ); + + return Err(e); + } + Self::validate_nakamoto_block_transactions_static( + mainnet, + chain_id, + db_handle.conn(), + block, + tenure_burn_chain_tip.block_height, + )?; + Ok(()) + } + + /// Load the stored VRF proof for the given shadow block's tenure. + /// + /// Returns Ok(Some(vrf proof)) on success + /// Returns Ok(None) if the parent tenure isn't a shadow tenure + pub(crate) fn get_shadow_vrf_proof( + chainstate_conn: &mut SDBI, + tip_block_id: &StacksBlockId, + ) -> Result, ChainstateError> { + // is the tip a shadow block (and necessarily a Nakamoto block)? + let Some(parent_version) = + NakamotoChainState::get_nakamoto_block_version(chainstate_conn.sqlite(), tip_block_id)? + else { + return Ok(None); + }; + + if !NakamotoBlockHeader::is_shadow_block_version(parent_version) { + return Ok(None); + } + + // this is a shadow block + let tenure_consensus_hash = NakamotoChainState::get_block_header_nakamoto_tenure_id( + chainstate_conn.sqlite(), + tip_block_id, + )? + .ok_or_else(|| { + warn!("No tenure consensus hash for block {}", tip_block_id); + ChainstateError::NoSuchBlockError + })?; + + // the shadow tenure won't have a block-commit, but we just found its tenure ID anyway + debug!( + "Load VRF proof for shadow tenure {}", + &tenure_consensus_hash + ); + let vrf_proof = + Self::get_block_vrf_proof(chainstate_conn, tip_block_id, &tenure_consensus_hash)? + .ok_or_else(|| { + warn!("No VRF proof for {}", &tenure_consensus_hash); + ChainstateError::NoSuchBlockError + }) + .map_err(|e| { + warn!("Could not find shadow tenure VRF proof"; + "tip_block_id" => %tip_block_id, + "shadow consensus_hash" => %tenure_consensus_hash); + e + })?; + + return Ok(Some(vrf_proof)); + } + + /// Begin block-processing for a shadow block and return all of the pre-processed state within a + /// `SetupBlockResult`. + /// + /// Called to begin processing a shadow block + pub(crate) fn setup_shadow_block_processing<'a, 'b>( + chainstate_tx: &'b mut ChainstateTx, + clarity_instance: &'a mut ClarityInstance, + sortition_dbconn: &'b dyn SortitionDBRef, + first_block_height: u64, + pox_constants: &PoxConstants, + parent_consensus_hash: ConsensusHash, + parent_header_hash: BlockHeaderHash, + parent_burn_height: u32, + tenure_block_snapshot: BlockSnapshot, + new_tenure: bool, + coinbase_height: u64, + tenure_extend: bool, + ) -> Result, ChainstateError> { + let burn_header_hash = &tenure_block_snapshot.burn_header_hash; + let burn_header_height = + u32::try_from(tenure_block_snapshot.block_height).map_err(|_| { + ChainstateError::InvalidStacksBlock( + "Failed to downcast burn block height to u32".into(), + ) + })?; + let block_consensus_hash = &tenure_block_snapshot.consensus_hash; + + let parent_block_id = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); + + // tenure start header must exist and be processed + let _ = Self::get_nakamoto_tenure_start_block_header( + chainstate_tx.as_tx(), + &parent_block_id, + &parent_consensus_hash, + )? + .ok_or_else(|| { + warn!("Invalid shadow Nakamoto block: no start-tenure block for parent"; + "parent_consensus_hash" => %parent_consensus_hash, + "consensus_hash" => %block_consensus_hash + ); + ChainstateError::NoSuchBlockError + })?; + + Self::inner_setup_block( + chainstate_tx, + clarity_instance, + sortition_dbconn, + first_block_height, + pox_constants, + parent_consensus_hash, + parent_header_hash, + parent_burn_height, + burn_header_hash.clone(), + burn_header_height, + new_tenure, + coinbase_height, + tenure_extend, + ) + } +} + +impl NakamotoBlockBuilder { + /// This function should be called before `tenure_begin`. + /// It creates a MinerTenureInfo struct which owns connections to the chainstate and sortition + /// DBs, so that block-processing is guaranteed to terminate before the lives of these handles + /// expire. + /// + /// It's used to create shadow blocks. + pub(crate) fn shadow_load_tenure_info<'a>( + &self, + chainstate: &'a mut StacksChainState, + burn_dbconn: &'a SortitionHandleConn, + cause: Option, + ) -> Result, Error> { + self.inner_load_tenure_info(chainstate, burn_dbconn, cause, true) + } + + /// Begin/resume mining a shadow tenure's transactions. + /// Returns an open ClarityTx for mining the block. + /// NOTE: even though we don't yet know the block hash, the Clarity VM ensures that a + /// transaction can't query information about the _current_ block (i.e. information that is not + /// yet known). + pub fn shadow_tenure_begin<'a, 'b>( + &mut self, + burn_dbconn: &'a SortitionHandleConn, + info: &'b mut MinerTenureInfo<'a>, + tenure_id_consensus_hash: &ConsensusHash, + ) -> Result, Error> { + let tenure_snapshot = SortitionDB::get_block_snapshot_consensus( + burn_dbconn.conn(), + tenure_id_consensus_hash, + )? + .ok_or_else(|| Error::NoSuchBlockError)?; + + let SetupBlockResult { + clarity_tx, + matured_miner_rewards_opt, + .. + } = NakamotoChainState::setup_shadow_block_processing( + &mut info.chainstate_tx, + info.clarity_instance, + burn_dbconn, + burn_dbconn.context.first_block_height, + &burn_dbconn.context.pox_constants, + info.parent_consensus_hash, + info.parent_header_hash, + info.parent_burn_block_height, + tenure_snapshot, + info.cause == Some(TenureChangeCause::BlockFound), + info.coinbase_height, + info.cause == Some(TenureChangeCause::Extended), + )?; + self.matured_miner_rewards_opt = matured_miner_rewards_opt; + Ok(clarity_tx) + } + + /// Get an address's account + pub fn get_account( + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + addr: &StacksAddress, + tip: &StacksHeaderInfo, + ) -> Result { + let snapshot = + SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &tip.consensus_hash)? + .ok_or_else(|| Error::NoSuchBlockError)?; + + let account = chainstate + .with_read_only_clarity_tx( + &sortdb.index_handle(&snapshot.sortition_id), + &tip.index_block_hash(), + |clarity_conn| { + StacksChainState::get_account(clarity_conn, &addr.to_account_principal()) + }, + ) + .ok_or_else(|| Error::NoSuchBlockError)?; + + Ok(account) + } + + /// Make a shadow block from transactions + pub fn make_shadow_block_from_txs( + mut builder: NakamotoBlockBuilder, + chainstate_handle: &StacksChainState, + burn_dbconn: &SortitionHandleConn, + tenure_id_consensus_hash: &ConsensusHash, + mut txs: Vec, + ) -> Result<(NakamotoBlock, u64, ExecutionCost), Error> { + use clarity::vm::ast::ASTRules; + + debug!( + "Build shadow Nakamoto block from {} transactions", + txs.len() + ); + let (mut chainstate, _) = chainstate_handle.reopen()?; + + let mut tenure_cause = None; + for tx in txs.iter() { + let TransactionPayload::TenureChange(payload) = &tx.payload else { + continue; + }; + tenure_cause = Some(payload.cause); + break; + } + + let mut miner_tenure_info = + builder.shadow_load_tenure_info(&mut chainstate, burn_dbconn, tenure_cause)?; + let mut tenure_tx = builder.shadow_tenure_begin( + burn_dbconn, + &mut miner_tenure_info, + tenure_id_consensus_hash, + )?; + for tx in txs.drain(..) { + let tx_len = tx.tx_len(); + match builder.try_mine_tx_with_len( + &mut tenure_tx, + &tx, + tx_len, + &BlockLimitFunction::NO_LIMIT_HIT, + ASTRules::PrecheckSize, + ) { + TransactionResult::Success(..) => { + debug!("Included {}", &tx.txid()); + } + TransactionResult::Skipped(TransactionSkipped { error, .. }) + | TransactionResult::ProcessingError(TransactionError { error, .. }) => { + match error { + Error::BlockTooBigError => { + // done mining -- our execution budget is exceeded. + // Make the block from the transactions we did manage to get + debug!("Block budget exceeded on tx {}", &tx.txid()); + } + Error::InvalidStacksTransaction(_emsg, true) => { + // if we have an invalid transaction that was quietly ignored, don't warn here either + test_debug!( + "Failed to apply tx {}: InvalidStacksTransaction '{:?}'", + &tx.txid(), + &_emsg + ); + continue; + } + Error::ProblematicTransaction(txid) => { + test_debug!("Encountered problematic transaction. Aborting"); + return Err(Error::ProblematicTransaction(txid)); + } + e => { + warn!("Failed to apply tx {}: {:?}", &tx.txid(), &e); + continue; + } + } + } + TransactionResult::Problematic(TransactionProblematic { tx, .. }) => { + // drop from the mempool + debug!("Encountered problematic transaction {}", &tx.txid()); + return Err(Error::ProblematicTransaction(tx.txid())); + } + } + } + let block = builder.mine_nakamoto_block(&mut tenure_tx); + let size = builder.bytes_so_far; + let cost = builder.tenure_finish(tenure_tx)?; + Ok((block, size, cost)) + } + + /// Produce a single-block shadow tenure. + /// Used by tooling to synthesize shadow blocks in case of an emergency. + /// The details and circumstances will be recorded in an accompanying SIP. + /// + /// `naka_tip_id` is the Stacks chain tip on top of which the shadow block will be built. + /// `tenure_id_consensus_hash` is the sortition in which the shadow block will be built. + /// `txs` are transactions to include, beyond a coinbase and tenure-change + pub fn make_shadow_tenure( + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + naka_tip_id: StacksBlockId, + tenure_id_consensus_hash: ConsensusHash, + mut txs: Vec, + ) -> Result { + let mainnet = chainstate.config().mainnet; + let chain_id = chainstate.config().chain_id; + + let recipient = StacksAddress::burn_address(mainnet).to_account_principal(); + let vrf_proof_bytes = vec![ + 0x92, 0x75, 0xdf, 0x67, 0xa6, 0x8c, 0x87, 0x45, 0xc0, 0xff, 0x97, 0xb4, 0x82, 0x01, + 0xee, 0x6d, 0xb4, 0x47, 0xf7, 0xc9, 0x3b, 0x23, 0xae, 0x24, 0xcd, 0xc2, 0x40, 0x0f, + 0x52, 0xfd, 0xb0, 0x8a, 0x1a, 0x6a, 0xc7, 0xec, 0x71, 0xbf, 0x9c, 0x9c, 0x76, 0xe9, + 0x6e, 0xe4, 0x67, 0x5e, 0xbf, 0xf6, 0x06, 0x25, 0xaf, 0x28, 0x71, 0x85, 0x01, 0x04, + 0x7b, 0xfd, 0x87, 0xb8, 0x10, 0xc2, 0xd2, 0x13, 0x9b, 0x73, 0xc2, 0x3b, 0xd6, 0x9d, + 0xe6, 0x63, 0x60, 0x95, 0x3a, 0x64, 0x2c, 0x2a, 0x33, 0x0a, + ]; + + // safety -- we know it's a good proof + let vrf_proof = VRFProof::from_bytes(vrf_proof_bytes.as_slice()).unwrap(); + + let naka_tip_header = NakamotoChainState::get_block_header(chainstate.db(), &naka_tip_id)? + .ok_or_else(|| { + warn!("No such Nakamoto tip: {:?}", &naka_tip_id); + Error::NoSuchBlockError + })?; + + let naka_tip_tenure_start_header = NakamotoChainState::get_tenure_start_block_header( + &mut chainstate.index_conn(), + &naka_tip_id, + &naka_tip_header.consensus_hash, + )? + .ok_or_else(|| { + Error::InvalidStacksBlock(format!( + "No tenure-start block header for tenure {}", + &naka_tip_header.consensus_hash + )) + })?; + + if naka_tip_header.anchored_header.height() + 1 + <= naka_tip_tenure_start_header.anchored_header.height() + { + return Err(Error::InvalidStacksBlock( + "Nakamoto tip is lower than its tenure-start block".into(), + )); + } + + let coinbase_payload = CoinbasePayload(naka_tip_tenure_start_header.index_block_hash().0); + + // the miner key is irrelevant + let miner_key = StacksPrivateKey::new(); + let miner_addr = StacksAddress::p2pkh(mainnet, &StacksPublicKey::from_private(&miner_key)); + let miner_tx_auth = TransactionAuth::from_p2pkh(&miner_key).ok_or_else(|| { + Error::InvalidStacksBlock( + "Unable to construct transaction auth from transient private key".into(), + ) + })?; + + let tx_version = if mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + let miner_account = Self::get_account(chainstate, sortdb, &miner_addr, &naka_tip_header)?; + + // tenure change payload (BlockFound) + let tenure_change_payload = TenureChangePayload { + tenure_consensus_hash: tenure_id_consensus_hash.clone(), + prev_tenure_consensus_hash: naka_tip_header.consensus_hash, + burn_view_consensus_hash: tenure_id_consensus_hash.clone(), + previous_tenure_end: naka_tip_id, + previous_tenure_blocks: (naka_tip_header.anchored_header.height() + 1 + - naka_tip_tenure_start_header.anchored_header.height()) + as u32, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160::from_node_public_key(&StacksPublicKey::from_private(&miner_key)), + }; + + // tenure-change tx + let tenure_change_tx = { + let mut tx_tenure_change = StacksTransaction::new( + tx_version.clone(), + miner_tx_auth.clone(), + TransactionPayload::TenureChange(tenure_change_payload), + ); + tx_tenure_change.chain_id = chain_id; + tx_tenure_change.anchor_mode = TransactionAnchorMode::OnChainOnly; + tx_tenure_change.auth.set_origin_nonce(miner_account.nonce); + + let mut tx_signer = StacksTransactionSigner::new(&tx_tenure_change); + tx_signer.sign_origin(&miner_key)?; + let tx_tenure_change_signed = tx_signer + .get_tx() + .ok_or_else(|| Error::InvalidStacksBlock("Failed to sign tenure change".into()))?; + tx_tenure_change_signed + }; + + // coinbase tx + let coinbase_tx = { + let mut tx_coinbase = StacksTransaction::new( + tx_version.clone(), + miner_tx_auth.clone(), + TransactionPayload::Coinbase(coinbase_payload, Some(recipient), Some(vrf_proof)), + ); + tx_coinbase.chain_id = chain_id; + tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; + tx_coinbase.auth.set_origin_nonce(miner_account.nonce + 1); + + let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); + tx_signer.sign_origin(&miner_key)?; + let tx_coinbase_signed = tx_signer + .get_tx() + .ok_or_else(|| Error::InvalidStacksBlock("Failed to sign coinbase".into()))?; + tx_coinbase_signed + }; + + // `burn_tip` corresponds to the burn view consensus hash of the tenure. + let burn_tip = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tenure_id_consensus_hash)? + .ok_or_else(|| Error::InvalidStacksBlock("No such tenure ID".into()))?; + + debug!( + "Build Nakamoto shadow block in tenure {} sortition {} parent_tip {}", + &tenure_id_consensus_hash, &burn_tip.consensus_hash, &naka_tip_id + ); + + // make a block + let builder = NakamotoBlockBuilder::new( + &naka_tip_header, + &tenure_id_consensus_hash, + burn_tip.total_burn, + Some(&tenure_change_tx), + Some(&coinbase_tx), + 1, + None, + )?; + + let mut block_txs = vec![tenure_change_tx, coinbase_tx]; + block_txs.append(&mut txs); + let (mut shadow_block, _size, _cost) = Self::make_shadow_block_from_txs( + builder, + &chainstate, + &sortdb.index_handle(&burn_tip.sortition_id), + &tenure_id_consensus_hash, + block_txs, + )?; + + shadow_block.header.version |= 0x80; + + // no need to sign with the signer set; just the miner is sufficient + // (and it can be any miner) + shadow_block.header.sign_miner(&miner_key)?; + + Ok(shadow_block) + } +} + +impl<'a> NakamotoStagingBlocksConnRef<'a> { + /// Determine if we have a particular block with the given index hash. + /// Returns Ok(true) if so + /// Returns Ok(false) if not + /// Returns Err(..) on DB error + pub fn has_shadow_nakamoto_block_with_index_hash( + &self, + index_block_hash: &StacksBlockId, + ) -> Result { + let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ?1 AND obtain_method = ?2"; + let args = params![ + index_block_hash, + &NakamotoBlockObtainMethod::Shadow.to_string() + ]; + let res: Option = query_row(self, qry, args)?; + Ok(res.is_some()) + } + + /// Is this a shadow tenure? + /// If any block is a shadow block in the tenure, they must all be. + /// + /// Returns true if the tenure has at least one shadow block. + pub fn is_shadow_tenure( + &self, + consensus_hash: &ConsensusHash, + ) -> Result { + let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND obtain_method = ?2"; + let args = rusqlite::params![ + consensus_hash, + NakamotoBlockObtainMethod::Shadow.to_string() + ]; + let present: Option = query_row(self, qry, args)?; + Ok(present.is_some()) + } + + /// Shadow blocks, unlike Stacks blocks, have a unique place in the chain history. + /// They are inserted post-hoc, so they and their underlying burnchain blocks don't get + /// invalidated via a fork. A consensus hash can identify (1) no tenures, (2) a single + /// shadow tenure, or (3) one or more non-shadow tenures. + /// + /// This is important when downloading a tenure that is ended by a shadow block, since it won't + /// be processed beforehand and its hash isn't learned from the burnchain (so we must be able + /// to infer that if this is a shadow tenure, none of the blocks in it have siblings). + pub fn get_shadow_tenure_start_block( + &self, + ch: &ConsensusHash, + ) -> Result, ChainstateError> { + let qry = "SELECT data FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND obtain_method = ?2 ORDER BY height DESC LIMIT 1"; + let args = params![ch, &NakamotoBlockObtainMethod::Shadow.to_string()]; + let res: Option> = query_row(self, qry, args)?; + let Some(block_bytes) = res else { + return Ok(None); + }; + let block = NakamotoBlock::consensus_deserialize(&mut block_bytes.as_slice())?; + if !block.is_shadow_block() { + error!("Staging DB corruption: expected shadow block from {}", ch); + return Err(DBError::Corruption.into()); + } + Ok(Some(block)) + } +} + +impl<'a> NakamotoStagingBlocksTx<'a> { + /// Add a shadow block. + /// Fails if there are any non-shadow blocks present in the tenure. + pub fn add_shadow_block(&self, shadow_block: &NakamotoBlock) -> Result<(), ChainstateError> { + if !shadow_block.is_shadow_block() { + return Err(ChainstateError::InvalidStacksBlock( + "Not a shadow block".into(), + )); + } + let block_id = shadow_block.block_id(); + + // is this block stored already? + let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; + let args = params![block_id]; + let present: Option = query_row(self, qry, args)?; + if present.is_some() { + return Ok(()); + } + + // this tenure must be empty, or it must be a shadow tenure + let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1"; + let args = rusqlite::params![&shadow_block.header.consensus_hash]; + let present: Option = query_row(self, qry, args)?; + if present.is_some() + && !self + .conn() + .is_shadow_tenure(&shadow_block.header.consensus_hash)? + { + return Err(ChainstateError::InvalidStacksBlock( + "Shadow block cannot be inserted into non-empty non-shadow tenure".into(), + )); + } + + // there must not be a block at this height in this tenure + let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND height = ?2"; + let args = rusqlite::params![ + &shadow_block.header.consensus_hash, + u64_to_sql(shadow_block.header.chain_length)? + ]; + let present: Option = query_row(self, qry, args)?; + if present.is_some() { + return Err(ChainstateError::InvalidStacksBlock(format!( + "Conflicting block at height {} in tenure {}", + shadow_block.header.chain_length, &shadow_block.header.consensus_hash + ))); + } + + // the shadow block is crafted post-hoc, so we know the consensus hash exists. + // thus, it's always burn-attachable + let burn_attachable = true; + + // shadow blocks cannot be replaced + let signing_weight = u32::MAX; + + self.store_block( + shadow_block, + burn_attachable, + signing_weight, + NakamotoBlockObtainMethod::Shadow, + )?; + Ok(()) + } +} + +/// DO NOT RUN ON A RUNNING NODE (unless you're testing). +/// +/// Insert and process a shadow block into the Stacks chainstate. +pub fn process_shadow_block( + chain_state: &mut StacksChainState, + sort_db: &mut SortitionDB, + shadow_block: NakamotoBlock, +) -> Result<(), ChainstateError> { + let tx = chain_state.staging_db_tx_begin()?; + tx.add_shadow_block(&shadow_block)?; + tx.commit()?; + + let no_dispatch: Option = None; + loop { + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn())?; + + // process at most one block per loop pass + let processed_block_receipt = match NakamotoChainState::process_next_nakamoto_block( + chain_state, + sort_db, + &sort_tip.sortition_id, + no_dispatch.as_ref(), + ) { + Ok(receipt_opt) => receipt_opt, + Err(ChainstateError::InvalidStacksBlock(msg)) => { + warn!("Encountered invalid block: {}", &msg); + continue; + } + Err(ChainstateError::NetError(NetError::DeserializeError(msg))) => { + // happens if we load a zero-sized block (i.e. an invalid block) + warn!("Encountered invalid block (codec error): {}", &msg); + continue; + } + Err(e) => { + // something else happened + return Err(e.into()); + } + }; + + if processed_block_receipt.is_none() { + // out of blocks + info!("No more blocks to process (no receipts)"); + break; + }; + + let Some((_, processed, orphaned, _)) = chain_state + .nakamoto_blocks_db() + .get_block_processed_and_signed_weight( + &shadow_block.header.consensus_hash, + &shadow_block.header.block_hash(), + )? + else { + return Err(ChainstateError::InvalidStacksBlock(format!( + "Shadow block {} for tenure {} not store", + &shadow_block.block_id(), + &shadow_block.header.consensus_hash + ))); + }; + + if orphaned { + return Err(ChainstateError::InvalidStacksBlock(format!( + "Shadow block {} for tenure {} was orphaned", + &shadow_block.block_id(), + &shadow_block.header.consensus_hash + ))); + } + + if processed { + break; + } + } + Ok(()) +} + +/// DO NOT RUN ON A RUNNING NODE (unless you're testing). +/// +/// Automatically repair a node that has been stalled due to an empty prepare phase. +/// Works by synthesizing, inserting, and processing shadow tenures in-between the last sortition +/// with a winner and the burnchain tip. +/// +/// This is meant to be accessed by the tooling. Once the blocks are synthesized, they would be +/// added into other broken nodes' chainstates by the same tooling. Ultimately, a patched node +/// would be released with these shadow blocks added in as part of the chainstate schema. +/// +/// Returns the syntheisized shadow blocks on success. +/// Returns error on failure. +pub fn shadow_chainstate_repair( + chain_state: &mut StacksChainState, + sort_db: &mut SortitionDB, +) -> Result, ChainstateError> { + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn())?; + + let header = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db)? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; + + let header_sn = + SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &header.consensus_hash)? + .ok_or_else(|| { + ChainstateError::InvalidStacksBlock( + "Canonical stacks header does not have a sortition".into(), + ) + })?; + + let mut shadow_blocks = vec![]; + for burn_height in (header_sn.block_height + 1)..sort_tip.block_height { + let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn())?; + let sort_handle = sort_db.index_handle(&sort_tip.sortition_id); + let sn = sort_handle + .get_block_snapshot_by_height(burn_height)? + .ok_or_else(|| ChainstateError::InvalidStacksBlock("No sortition at height".into()))?; + + let header = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db)? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; + + let chain_tip = header.index_block_hash(); + let shadow_block = NakamotoBlockBuilder::make_shadow_tenure( + chain_state, + sort_db, + chain_tip.clone(), + sn.consensus_hash, + vec![], + )?; + + shadow_blocks.push(shadow_block.clone()); + + process_shadow_block(chain_state, sort_db, shadow_block)?; + } + + Ok(shadow_blocks) +} diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 382c708850..c3e8432878 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -28,7 +28,7 @@ use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::index::marf::MarfConnection; use crate::chainstate::stacks::{Error as ChainstateError, StacksBlock, StacksBlockHeader}; @@ -41,10 +41,16 @@ use crate::util_lib::db::{ /// The means by which a block is obtained. #[derive(Debug, PartialEq, Clone, Copy)] pub enum NakamotoBlockObtainMethod { + /// The block was fetched by te block downloader Downloaded, + /// The block was uploaded to us via p2p Pushed, + /// This node mined the block Mined, + /// The block was uploaded to us via HTTP Uploaded, + /// This is a shadow block -- it was created by a SIP to fix a consensus bug + Shadow, } impl fmt::Display for NakamotoBlockObtainMethod { @@ -149,7 +155,12 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_2: &'static [&'static str] = &[ r#"INSERT INTO db_version (version) VALUES (2)"#, ]; -pub const NAKAMOTO_STAGING_DB_SCHEMA_LATEST: u32 = 2; +pub const NAKAMOTO_STAGING_DB_SCHEMA_3: &'static [&'static str] = &[ + r#"CREATE INDEX nakamoto_staging_blocks_by_obtain_method ON nakamoto_staging_blocks(consensus_hash,obtain_method);"#, + r#"UPDATE db_version SET version = 3"#, +]; + +pub const NAKAMOTO_STAGING_DB_SCHEMA_LATEST: u32 = 3; pub struct NakamotoStagingBlocksConn(rusqlite::Connection); @@ -211,6 +222,21 @@ impl<'a> DerefMut for NakamotoStagingBlocksTx<'a> { &mut self.0 } } +/// Open a Blob handle to a Nakamoto block +fn inner_open_nakamoto_block<'a>( + conn: &'a Connection, + rowid: i64, + readwrite: bool, +) -> Result, ChainstateError> { + let blob = conn.blob_open( + rusqlite::DatabaseName::Main, + "nakamoto_staging_blocks", + "data", + rowid, + !readwrite, + )?; + Ok(blob) +} impl NakamotoStagingBlocksConn { /// Open a Blob handle to a Nakamoto block @@ -219,18 +245,20 @@ impl NakamotoStagingBlocksConn { rowid: i64, readwrite: bool, ) -> Result, ChainstateError> { - let blob = self.blob_open( - rusqlite::DatabaseName::Main, - "nakamoto_staging_blocks", - "data", - rowid, - !readwrite, - )?; - Ok(blob) + inner_open_nakamoto_block(self.deref(), rowid, readwrite) } } impl<'a> NakamotoStagingBlocksConnRef<'a> { + /// Open a Blob handle to a Nakamoto block + pub fn open_nakamoto_block( + &'a self, + rowid: i64, + readwrite: bool, + ) -> Result, ChainstateError> { + inner_open_nakamoto_block(self.deref(), rowid, readwrite) + } + /// Determine if we have a particular block with the given index hash. /// Returns Ok(true) if so /// Returns Ok(false) if not @@ -250,7 +278,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { /// There will be at most one such block. /// /// NOTE: for Nakamoto blocks, the sighash is the same as the block hash. - pub(crate) fn get_block_processed_and_signed_weight( + pub fn get_block_processed_and_signed_weight( &self, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, @@ -332,6 +360,32 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { ))) } + /// Get a Nakamoto block header by index block hash. + /// Verifies its integrity + /// Returns Ok(Some(header)) if the block was present + /// Returns Ok(None) if there was no such block + /// Returns Err(..) on DB error, including corruption + pub fn get_nakamoto_block_header( + &self, + index_block_hash: &StacksBlockId, + ) -> Result, ChainstateError> { + let Some(rowid) = self.get_nakamoto_block_rowid(index_block_hash)? else { + return Ok(None); + }; + + let mut fd = self.open_nakamoto_block(rowid, false)?; + let block_header = NakamotoBlockHeader::consensus_deserialize(&mut fd)?; + if &block_header.block_id() != index_block_hash { + error!( + "Staging DB corruption: expected {}, got {}", + index_block_hash, + &block_header.block_id() + ); + return Err(DBError::Corruption.into()); + } + Ok(Some(block_header)) + } + /// Get the size of a Nakamoto block, given its index block hash /// Returns Ok(Some(size)) if the block was present /// Returns Ok(None) if there was no such block @@ -443,14 +497,6 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { }) } - /// Given a block ID, determine if it has children that have been processed and accepted - pub fn has_children(&self, index_block_hash: &StacksBlockId) -> Result { - let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE parent_block_id = ?1 AND processed = 1 AND orphaned = 0 LIMIT 1"; - let args = rusqlite::params![index_block_hash]; - let children_flags: Option = query_row(self, qry, args)?; - Ok(children_flags.is_some()) - } - /// Given a consensus hash, determine if the burn block has been processed. /// Because this is stored in a denormalized way, we'll want to do this whenever we store a /// block (so we can set `burn_attachable` accordingly) @@ -534,6 +580,19 @@ impl<'a> NakamotoStagingBlocksTx<'a> { .is_burn_block_processed(&block.header.consensus_hash)? }; + let obtain_method = if block.is_shadow_block() { + // override + NakamotoBlockObtainMethod::Shadow + } else { + obtain_method + }; + + if self.conn().is_shadow_tenure(&block.header.consensus_hash)? && !block.is_shadow_block() { + return Err(ChainstateError::InvalidStacksBlock( + "Tried to insert a non-shadow block into a shadow tenure".into(), + )); + } + self.execute( "INSERT INTO nakamoto_staging_blocks ( block_hash, @@ -715,15 +774,37 @@ impl StacksChainState { /// Perform migrations pub fn migrate_nakamoto_staging_blocks(conn: &Connection) -> Result<(), ChainstateError> { - let mut version = Self::get_nakamoto_staging_blocks_db_version(conn)?; - if version < 2 { - debug!("Migrate Nakamoto staging blocks DB to schema 2"); - for cmd in NAKAMOTO_STAGING_DB_SCHEMA_2.iter() { - conn.execute(cmd, NO_PARAMS)?; + loop { + let version = Self::get_nakamoto_staging_blocks_db_version(conn)?; + if version == NAKAMOTO_STAGING_DB_SCHEMA_LATEST { + return Ok(()); + } + match version { + 1 => { + debug!("Migrate Nakamoto staging blocks DB to schema 2"); + for cmd in NAKAMOTO_STAGING_DB_SCHEMA_2.iter() { + conn.execute(cmd, NO_PARAMS)?; + } + let version = Self::get_nakamoto_staging_blocks_db_version(conn)?; + assert_eq!(version, 2, "Nakamoto staging DB migration failure"); + debug!("Migrated Nakamoto staging blocks DB to schema 2"); + } + 2 => { + debug!("Migrate Nakamoto staging blocks DB to schema 3"); + for cmd in NAKAMOTO_STAGING_DB_SCHEMA_3.iter() { + conn.execute(cmd, NO_PARAMS)?; + } + let version = Self::get_nakamoto_staging_blocks_db_version(conn)?; + assert_eq!(version, 3, "Nakamoto staging DB migration failure"); + debug!("Migrated Nakamoto staging blocks DB to schema 3"); + } + NAKAMOTO_STAGING_DB_SCHEMA_LATEST => { + break; + } + _ => { + panic!("Unusable staging DB: Unknown schema version {}", version); + } } - version = Self::get_nakamoto_staging_blocks_db_version(conn)?; - assert_eq!(version, 2, "Nakamoto staging DB migration failure"); - debug!("Migrated Nakamoto staging blocks DB to schema 2"); } Ok(()) } diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 4b7734653c..9852733311 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -749,8 +749,18 @@ impl NakamotoChainState { warn!("Invalid tenure-change: parent snapshot comes after current tip"; "burn_view_consensus_hash" => %tenure_payload.burn_view_consensus_hash, "prev_tenure_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash); return Ok(None); } - if !prev_sn.sortition { - // parent wasn't a sortition-induced tenure change + + // is the parent a shadow block? + // Only possible if the parent is also a nakamoto block + let is_parent_shadow_block = NakamotoChainState::get_nakamoto_block_version( + headers_conn.sqlite(), + &block_header.parent_block_id, + )? + .map(|parent_version| NakamotoBlockHeader::is_shadow_block_version(parent_version)) + .unwrap_or(false); + + if !is_parent_shadow_block && !prev_sn.sortition { + // parent wasn't a shadow block (we expect a sortition), but this wasn't a sortition-induced tenure change warn!("Invalid tenure-change: no block found"; "prev_tenure_consensus_hash" => %tenure_payload.prev_tenure_consensus_hash ); @@ -758,8 +768,8 @@ impl NakamotoChainState { } } - // the tenure must correspond to sortitions - if !tenure_sn.sortition { + // if this isn't a shadow block, then the tenure must correspond to sortitions + if !block_header.is_shadow_block() && !tenure_sn.sortition { warn!("Invalid tenure-change: no block found"; "tenure_consensus_hash" => %tenure_payload.tenure_consensus_hash ); @@ -840,6 +850,7 @@ impl NakamotoChainState { handle: &mut SH, block: &NakamotoBlock, parent_coinbase_height: u64, + do_not_advance: bool, ) -> Result { let Some(tenure_payload) = block.get_tenure_tx_payload() else { // no new tenure @@ -867,6 +878,9 @@ impl NakamotoChainState { )); }; + if do_not_advance { + return Ok(coinbase_height); + } Self::insert_nakamoto_tenure(headers_tx, &block.header, coinbase_height, tenure_payload)?; return Ok(coinbase_height); } @@ -1052,6 +1066,15 @@ impl NakamotoChainState { ChainstateError::NoSuchBlockError })?; + if snapshot.consensus_hash != *block_consensus_hash { + // should be unreachable, but check defensively + warn!( + "Snapshot for {} is not the same as the one for {}", + &burn_header_hash, block_consensus_hash + ); + return Err(ChainstateError::NoSuchBlockError); + } + Ok(snapshot) } } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index ea163730ec..94ef81c077 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -1663,7 +1663,9 @@ pub fn test_load_store_update_nakamoto_blocks() { /// Tests: /// * NakamotoBlockHeader::check_miner_signature /// * NakamotoBlockHeader::check_tenure_tx -/// * NakamotoBlockHeader::check_coinbase_tx +/// * NakamotoBlockHeader::is_shadow_block +/// * NakamotoBlockHeader::check_normal_coinbase_tx +/// * NakamotoBlockHeader::check_shadow_coinbase_tx #[test] fn test_nakamoto_block_static_verification() { let private_key = StacksPrivateKey::new(); @@ -1674,9 +1676,25 @@ fn test_nakamoto_block_static_verification() { let sortition_hash = SortitionHash([0x01; 32]); let vrf_proof = VRF::prove(&vrf_privkey, sortition_hash.as_bytes()); + let burn_recipient = StacksAddress::burn_address(false).to_account_principal(); + let alt_recipient = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&private_key_2)) + .to_account_principal(); + let coinbase_payload = TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(vrf_proof.clone())); + let coinbase_recipient_payload = TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(alt_recipient), + Some(vrf_proof.clone()), + ); + + let coinbase_shadow_recipient_payload = TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(burn_recipient), + Some(vrf_proof.clone()), + ); + let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&private_key).unwrap(), @@ -1685,6 +1703,22 @@ fn test_nakamoto_block_static_verification() { coinbase_tx.chain_id = 0x80000000; coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut coinbase_recipient_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + coinbase_recipient_payload.clone(), + ); + coinbase_recipient_tx.chain_id = 0x80000000; + coinbase_recipient_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + + let mut coinbase_shadow_recipient_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + coinbase_shadow_recipient_payload.clone(), + ); + coinbase_shadow_recipient_tx.chain_id = 0x80000000; + coinbase_shadow_recipient_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let tenure_change_payload = TenureChangePayload { tenure_consensus_hash: ConsensusHash([0x04; 20]), // same as in nakamoto header prev_tenure_consensus_hash: ConsensusHash([0x01; 20]), @@ -1754,6 +1788,29 @@ fn test_nakamoto_block_static_verification() { MerkleTree::::new(&txid_vecs).root() }; + let nakamoto_recipient_txs = vec![tenure_change_tx.clone(), coinbase_recipient_tx.clone()]; + let nakamoto_recipient_tx_merkle_root = { + let txid_vecs = nakamoto_recipient_txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + + let nakamoto_shadow_recipient_txs = vec![ + tenure_change_tx.clone(), + coinbase_shadow_recipient_tx.clone(), + ]; + let nakamoto_shadow_recipient_tx_merkle_root = { + let txid_vecs = nakamoto_shadow_recipient_txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + let nakamoto_txs_bad_ch = vec![tenure_change_tx_bad_ch.clone(), coinbase_tx.clone()]; let nakamoto_tx_merkle_root_bad_ch = { let txid_vecs = nakamoto_txs_bad_ch @@ -1837,6 +1894,48 @@ fn test_nakamoto_block_static_verification() { txs: nakamoto_txs_bad_miner_sig, }; + let mut nakamoto_recipient_header = NakamotoBlockHeader { + version: 1, + chain_length: 457, + burn_spent: 126, + consensus_hash: tenure_change_payload.tenure_consensus_hash.clone(), + parent_block_id: StacksBlockId([0x03; 32]), + tx_merkle_root: nakamoto_recipient_tx_merkle_root, + state_index_root: TrieHash([0x07; 32]), + timestamp: 8, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + nakamoto_recipient_header.sign_miner(&private_key).unwrap(); + + let nakamoto_recipient_block = NakamotoBlock { + header: nakamoto_recipient_header.clone(), + txs: nakamoto_recipient_txs, + }; + + let mut nakamoto_shadow_recipient_header = NakamotoBlockHeader { + version: 1, + chain_length: 457, + burn_spent: 126, + consensus_hash: tenure_change_payload.tenure_consensus_hash.clone(), + parent_block_id: StacksBlockId([0x03; 32]), + tx_merkle_root: nakamoto_shadow_recipient_tx_merkle_root, + state_index_root: TrieHash([0x07; 32]), + timestamp: 8, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + nakamoto_shadow_recipient_header + .sign_miner(&private_key) + .unwrap(); + + let nakamoto_shadow_recipient_block = NakamotoBlock { + header: nakamoto_shadow_recipient_header.clone(), + txs: nakamoto_shadow_recipient_txs, + }; + assert_eq!( nakamoto_block.header.recover_miner_pk().unwrap(), StacksPublicKey::from_private(&private_key) @@ -1863,13 +1962,78 @@ fn test_nakamoto_block_static_verification() { let vrf_alt_pubkey = VRFPublicKey::from_private(&vrf_alt_privkey); assert!(nakamoto_block - .check_coinbase_tx(&vrf_pubkey, &sortition_hash) + .check_normal_coinbase_tx(&vrf_pubkey, &sortition_hash) .is_ok()); assert!(nakamoto_block - .check_coinbase_tx(&vrf_pubkey, &SortitionHash([0x02; 32])) + .check_normal_coinbase_tx(&vrf_pubkey, &SortitionHash([0x02; 32])) .is_err()); assert!(nakamoto_block - .check_coinbase_tx(&vrf_alt_pubkey, &sortition_hash) + .check_normal_coinbase_tx(&vrf_alt_pubkey, &sortition_hash) + .is_err()); + + let mut shadow_block = nakamoto_shadow_recipient_block.clone(); + shadow_block.header.version |= 0x80; + + assert!(!nakamoto_shadow_recipient_block.is_shadow_block()); + assert!(shadow_block.is_shadow_block()); + + // miner key not checked for shadow blocks + assert!(shadow_block + .check_miner_signature(&Hash160::from_node_public_key( + &StacksPublicKey::from_private(&private_key_2) + )) + .is_ok()); + + // shadow block VRF is not checked + assert!(shadow_block.check_shadow_coinbase_tx(false).is_ok()); + + // shadow blocks need burn recipeints for coinbases + let mut shadow_block_no_recipient = nakamoto_block.clone(); + shadow_block_no_recipient.header.version |= 0x80; + + assert!(shadow_block_no_recipient.is_shadow_block()); + assert!(shadow_block_no_recipient + .check_shadow_coinbase_tx(false) + .is_err()); + + let mut shadow_block_alt_recipient = nakamoto_block.clone(); + shadow_block_alt_recipient.header.version |= 0x80; + + assert!(shadow_block_alt_recipient.is_shadow_block()); + assert!(shadow_block_alt_recipient + .check_shadow_coinbase_tx(false) + .is_err()); + + // tenure tx requirements still hold for shadow blocks + let mut shadow_nakamoto_block = nakamoto_block.clone(); + let mut shadow_nakamoto_block_bad_ch = nakamoto_block_bad_ch.clone(); + let mut shadow_nakamoto_block_bad_miner_sig = nakamoto_block_bad_miner_sig.clone(); + + shadow_nakamoto_block.header.version |= 0x80; + shadow_nakamoto_block_bad_ch.header.version |= 0x80; + shadow_nakamoto_block_bad_miner_sig.header.version |= 0x80; + + shadow_nakamoto_block + .header + .sign_miner(&private_key) + .unwrap(); + shadow_nakamoto_block_bad_ch + .header + .sign_miner(&private_key) + .unwrap(); + shadow_nakamoto_block_bad_miner_sig + .header + .sign_miner(&private_key) + .unwrap(); + + assert!(shadow_nakamoto_block.is_shadow_block()); + assert!(shadow_nakamoto_block_bad_ch.is_shadow_block()); + assert!(shadow_nakamoto_block_bad_miner_sig.is_shadow_block()); + + assert!(shadow_nakamoto_block.check_tenure_tx().is_ok()); + assert!(shadow_nakamoto_block_bad_ch.check_tenure_tx().is_err()); + assert!(shadow_nakamoto_block_bad_miner_sig + .check_tenure_tx() .is_err()); } diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 6f929e0031..9a488d6a09 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -26,12 +26,13 @@ use hashbrown::HashMap; use rand::seq::SliceRandom; use rand::{CryptoRng, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; +use rusqlite::params; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use stacks_common::types::chainstate::{ BlockHeaderHash, SortitionId, StacksAddress, StacksBlockId, VRFSeed, }; -use stacks_common::util::hash::Hash160; +use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; @@ -51,11 +52,15 @@ use crate::chainstate::coordinator::{ use crate::chainstate::nakamoto::coordinator::{ get_nakamoto_next_recipients, load_nakamoto_reward_set, }; -use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; -use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; +use crate::chainstate::nakamoto::miner::{MinerTenureInfo, NakamotoBlockBuilder}; +use crate::chainstate::nakamoto::staging_blocks::{ + NakamotoBlockObtainMethod, NakamotoStagingBlocksConnRef, +}; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, StacksDBIndexed, +}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::db::blocks::test::store_staging_block; use crate::chainstate::stacks::db::test::*; @@ -71,7 +76,7 @@ use crate::cost_estimates::UnitEstimator; use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::test::{TestPeer, TestPeerConfig, *}; use crate::util_lib::boot::boot_code_addr; -use crate::util_lib::db::Error as db_error; +use crate::util_lib::db::{query_row, Error as db_error}; #[derive(Debug, Clone)] pub struct TestStacker { @@ -182,6 +187,7 @@ impl TestBurnchainBlock { fork_snapshot: Option<&BlockSnapshot>, parent_block_snapshot: Option<&BlockSnapshot>, vrf_seed: VRFSeed, + parent_is_shadow_block: bool, ) -> LeaderBlockCommitOp { let tenure_id_as_block_hash = BlockHeaderHash(last_tenure_id.0.clone()); self.inner_add_block_commit( @@ -194,6 +200,7 @@ impl TestBurnchainBlock { parent_block_snapshot, Some(vrf_seed), STACKS_EPOCH_3_0_MARKER, + parent_is_shadow_block, ) } } @@ -221,15 +228,26 @@ impl TestMiner { recipient: Option, vrf_proof: VRFProof, nonce: u64, + ) -> StacksTransaction { + self.make_nakamoto_coinbase_with_nonce_and_payload( + recipient, + vrf_proof, + nonce, + CoinbasePayload([(self.nonce % 256) as u8; 32]), + ) + } + + pub fn make_nakamoto_coinbase_with_nonce_and_payload( + &mut self, + recipient: Option, + vrf_proof: VRFProof, + nonce: u64, + payload: CoinbasePayload, ) -> StacksTransaction { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, self.as_transaction_auth().unwrap(), - TransactionPayload::Coinbase( - CoinbasePayload([(self.nonce % 256) as u8; 32]), - recipient, - Some(vrf_proof), - ), + TransactionPayload::Coinbase(payload, recipient, Some(vrf_proof)), ); tx_coinbase.chain_id = self.chain_id; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -273,6 +291,15 @@ impl TestMiner { } } +impl<'a> NakamotoStagingBlocksConnRef<'a> { + pub fn get_any_normal_tenure(&self) -> Result, ChainstateError> { + let qry = "SELECT consensus_hash FROM nakamoto_staging_blocks WHERE obtain_method != ?1 ORDER BY RANDOM() LIMIT 1"; + let args = params![&NakamotoBlockObtainMethod::Shadow.to_string()]; + let res: Option = query_row(self, qry, args)?; + Ok(res) + } +} + impl TestStacksNode { pub fn add_nakamoto_tenure_commit( sortdb: &SortitionDB, @@ -283,6 +310,7 @@ impl TestStacksNode { key_op: &LeaderKeyRegisterOp, parent_block_snapshot: Option<&BlockSnapshot>, vrf_seed: VRFSeed, + parent_is_shadow_block: bool, ) -> LeaderBlockCommitOp { let block_commit_op = { let ic = sortdb.index_conn(); @@ -296,6 +324,7 @@ impl TestStacksNode { Some(&parent_snapshot), parent_block_snapshot, vrf_seed, + parent_is_shadow_block, ) }; block_commit_op @@ -350,6 +379,7 @@ impl TestStacksNode { miner_key: &LeaderKeyRegisterOp, parent_block_snapshot_opt: Option<&BlockSnapshot>, expect_success: bool, + parent_is_shadow_block: bool, ) -> LeaderBlockCommitOp { info!( "Miner {}: Commit to Nakamoto tenure starting at {}", @@ -385,6 +415,7 @@ impl TestStacksNode { miner_key, parent_block_snapshot_opt, vrf_seed, + parent_is_shadow_block, ); test_debug!( @@ -453,71 +484,125 @@ impl TestStacksNode { ) -> (LeaderBlockCommitOp, TenureChangePayload) { // this is the tenure that the block-commit confirms. // It's not the last-ever tenure; it's the one just before it. - let (last_tenure_id, parent_block_snapshot) = - if let Some(parent_blocks) = parent_nakamoto_tenure { - // parent is an epoch 3 nakamoto block - let first_parent = parent_blocks.first().unwrap(); - let last_parent = parent_blocks.last().unwrap(); - let parent_tenure_id = StacksBlockId::new( - &first_parent.header.consensus_hash, - &first_parent.header.block_hash(), - ); - let parent_sortition = SortitionDB::get_block_snapshot_consensus( - &sortdb.conn(), - &first_parent.header.consensus_hash, + let (last_tenure_id, parent_block_snapshot, parent_is_shadow) = if let Some(parent_blocks) = + parent_nakamoto_tenure + { + // parent is an epoch 3 nakamoto block + let first_parent = parent_blocks.first().unwrap(); + let last_parent = parent_blocks.last().unwrap(); + let parent_tenure_id = StacksBlockId::new( + &first_parent.header.consensus_hash, + &first_parent.header.block_hash(), + ); + + let parent_sortition = if last_parent.is_shadow_block() { + // load up sortition that the shadow block replaces + SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &last_parent.header.consensus_hash, ) .unwrap() - .unwrap(); + .unwrap() + } else { + // parent sortition must be the last sortition _with a winner_. + // This is not guaranteed with shadow blocks, so we have to search back if + // necessary. + let mut cursor = first_parent.header.consensus_hash; + let parent_sortition = loop { + let parent_sortition = + SortitionDB::get_block_snapshot_consensus(&sortdb.conn(), &cursor) + .unwrap() + .unwrap(); - test_debug!( - "Work in {} {} for Nakamoto parent: {},{}. Last tenure ID is {}", - burn_block.block_height, - burn_block.parent_snapshot.burn_header_hash, - parent_sortition.total_burn, - last_parent.header.chain_length + 1, - &parent_tenure_id, - ); + if parent_sortition.sortition { + break parent_sortition; + } - (parent_tenure_id, parent_sortition) - } else if let Some(parent_stacks_block) = parent_stacks_block { - // building off an existing stacks block - let parent_stacks_block_snapshot = { - let ic = sortdb.index_conn(); - let parent_stacks_block_snapshot = - SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &burn_block.parent_snapshot.sortition_id, - &parent_stacks_block.block_hash(), + // last tenure was a shadow tenure? + let Ok(Some(tenure_start_header)) = + NakamotoChainState::get_tenure_start_block_header( + &mut self.chainstate.index_conn(), + &parent_tenure_id, + &cursor, + ) + else { + panic!("No tenure-start block header for tenure {}", &cursor); + }; + + let version = tenure_start_header + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .version; + + assert!(NakamotoBlockHeader::is_shadow_block_version(version)); + cursor = self + .chainstate + .index_conn() + .get_parent_tenure_consensus_hash( + &tenure_start_header.index_block_hash(), + &cursor, ) .unwrap() .unwrap(); - parent_stacks_block_snapshot }; + parent_sortition + }; - let parent_chain_tip = StacksChainState::get_anchored_block_header_info( - self.chainstate.db(), - &parent_stacks_block_snapshot.consensus_hash, - &parent_stacks_block.header.block_hash(), - ) - .unwrap() - .unwrap(); - - let parent_tenure_id = parent_chain_tip.index_block_hash(); - - test_debug!( - "Work in {} {} for Stacks 2.x parent: {},{}. Last tenure ID is {}", + test_debug!( + "Work in {} {} for Nakamoto parent: {},{}. Last tenure ID is {}. Parent sortition is {}", burn_block.block_height, burn_block.parent_snapshot.burn_header_hash, - parent_stacks_block_snapshot.total_burn, - parent_chain_tip.anchored_header.height(), + parent_sortition.total_burn, + last_parent.header.chain_length + 1, &parent_tenure_id, + &parent_sortition.consensus_hash ); - (parent_tenure_id, parent_stacks_block_snapshot) - } else { - panic!("Neither Nakamoto nor epoch2 parent found"); + ( + parent_tenure_id, + parent_sortition, + last_parent.is_shadow_block(), + ) + } else if let Some(parent_stacks_block) = parent_stacks_block { + // building off an existing stacks block + let parent_stacks_block_snapshot = { + let ic = sortdb.index_conn(); + let parent_stacks_block_snapshot = + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &burn_block.parent_snapshot.sortition_id, + &parent_stacks_block.block_hash(), + ) + .unwrap() + .unwrap(); + parent_stacks_block_snapshot }; + let parent_chain_tip = StacksChainState::get_anchored_block_header_info( + self.chainstate.db(), + &parent_stacks_block_snapshot.consensus_hash, + &parent_stacks_block.header.block_hash(), + ) + .unwrap() + .unwrap(); + + let parent_tenure_id = parent_chain_tip.index_block_hash(); + + test_debug!( + "Work in {} {} for Stacks 2.x parent: {},{}. Last tenure ID is {}", + burn_block.block_height, + burn_block.parent_snapshot.burn_header_hash, + parent_stacks_block_snapshot.total_burn, + parent_chain_tip.anchored_header.height(), + &parent_tenure_id, + ); + + (parent_tenure_id, parent_stacks_block_snapshot, false) + } else { + panic!("Neither Nakamoto nor epoch2 parent found"); + }; + // the tenure-change contains a pointer to the end of the last tenure, which is currently // the canonical tip unless overridden let (previous_tenure_end, previous_tenure_consensus_hash, previous_tenure_blocks) = @@ -551,7 +636,9 @@ impl TestStacksNode { ); (hdr.index_block_hash(), hdr.consensus_hash, tenure_len) } else { - // building atop epoch2 + // building atop epoch2 (so the parent block can't be a shadow block, meaning + // that parent_block_snapshot is _guaranteed_ to be the snapshot that chose + // last_tenure_id). debug!( "Tenure length of epoch2 tenure {} is {}; tipped at {}", &parent_block_snapshot.consensus_hash, 1, &last_tenure_id @@ -585,6 +672,7 @@ impl TestStacksNode { miner_key, Some(&parent_block_snapshot), tenure_change_cause == TenureChangeCause::BlockFound, + parent_is_shadow, ); (block_commit_op, tenure_change_payload) @@ -599,6 +687,10 @@ impl TestStacksNode { /// The first block will contain a coinbase, if coinbase is Some(..) /// Process the blocks via the chains coordinator as we produce them. /// + /// If malleablize is true, then malleablized blocks will be created by varying the number of + /// signatures. Each malleablized block will be processed and stored if its signatures clear + /// the signing threshold. + /// /// Returns a list of /// * the block /// * its size @@ -626,7 +718,7 @@ impl TestStacksNode { mut after_block: G, malleablize: bool, mined_canonical: bool, - ) -> Vec<(NakamotoBlock, u64, ExecutionCost, Vec)> + ) -> Result)>, ChainstateError> where S: FnMut(&mut NakamotoBlockBuilder), F: FnMut( @@ -665,7 +757,7 @@ impl TestStacksNode { let parent_tip_opt = if let Some(parent_id) = parent_id_opt { if let Some(nakamoto_parent) = - NakamotoChainState::get_block_header(chainstate.db(), &parent_id).unwrap() + NakamotoChainState::get_block_header(chainstate.db(), &parent_id)? { debug!( "Use parent tip identified by produced TenureChange ({})", @@ -674,8 +766,7 @@ impl TestStacksNode { Some(nakamoto_parent) } else { warn!("Produced Tenure change transaction does not point to a real block"); - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) - .unwrap() + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)? } } else if let Some(tenure_change) = tenure_change.as_ref() { // make sure parent tip is consistent with a tenure change @@ -683,9 +774,7 @@ impl TestStacksNode { if let Some(nakamoto_parent) = NakamotoChainState::get_block_header( chainstate.db(), &payload.previous_tenure_end, - ) - .unwrap() - { + )? { debug!( "Use parent tip identified by given TenureChange ({})", &payload.previous_tenure_end @@ -693,17 +782,16 @@ impl TestStacksNode { Some(nakamoto_parent) } else { debug!("Use parent tip identified by canonical tip pointer (no parent block {})", &payload.previous_tenure_end); - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) - .unwrap() + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)? } } else { panic!("Tenure change transaction does not have a TenureChange payload"); } } else { - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb).unwrap() + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb)? }; - let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; debug!( "Build Nakamoto block in tenure {} sortition {} parent_tip {:?}", @@ -729,8 +817,8 @@ impl TestStacksNode { None }, 1, - ) - .unwrap() + None, + )? } else { NakamotoBlockBuilder::new_first_block( &tenure_change.clone().unwrap(), @@ -747,22 +835,21 @@ impl TestStacksNode { chainstate, &sortdb.index_handle_at_tip(), txs, - ) - .unwrap(); + )?; let try_to_process = after_block(&mut nakamoto_block); miner.sign_nakamoto_block(&mut nakamoto_block); let tenure_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), tenure_id_consensus_hash) - .unwrap() - .unwrap(); + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), tenure_id_consensus_hash)? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; + let cycle = sortdb .pox_constants .block_height_to_reward_cycle(sortdb.first_block_height, tenure_sn.block_height) .unwrap(); // Get the reward set - let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; let reward_set = load_nakamoto_reward_set( miner .burnchain @@ -803,9 +890,11 @@ impl TestStacksNode { &block_id, &nakamoto_block.txs ); - let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; let mut sort_handle = sortdb.index_handle(&sort_tip); - let stacks_tip = sort_handle.get_nakamoto_tip_block_id().unwrap().unwrap(); + let stacks_tip = sort_handle + .get_nakamoto_tip_block_id()? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; let mut block_to_store = nakamoto_block.clone(); let mut processed_blocks = vec![]; @@ -864,9 +953,8 @@ impl TestStacksNode { let stacks_chain_tip = NakamotoChainState::get_canonical_block_header( chainstate.db(), &sortdb, - ) - .unwrap() - .unwrap(); + )? + .ok_or_else(|| ChainstateError::NoSuchBlockError)?; let nakamoto_chain_tip = stacks_chain_tip .anchored_header .as_stacks_nakamoto() @@ -917,11 +1005,11 @@ impl TestStacksNode { all_malleablized_blocks.push(malleablized_blocks); block_count += 1; } - blocks + Ok(blocks .into_iter() .zip(all_malleablized_blocks.into_iter()) .map(|((blk, sz, cost), mals)| (blk, sz, cost, mals)) - .collect() + .collect()) } pub fn make_nakamoto_block_from_txs( @@ -1106,33 +1194,74 @@ impl<'a> TestPeer<'a> { // find the VRF leader key register tx to use. // it's the one pointed to by the parent tenure - let parent_consensus_hash_opt = if let Some(parent_tenure) = parent_tenure_opt.as_ref() { - let tenure_start_block = parent_tenure.first().unwrap(); - Some(tenure_start_block.header.consensus_hash) - } else if let Some(parent_block) = parent_block_opt.as_ref() { - let parent_header_info = - StacksChainState::get_stacks_block_header_info_by_index_block_hash( - stacks_node.chainstate.db(), - &last_tenure_id, + let parent_consensus_hash_and_tenure_start_id_opt = + if let Some(parent_tenure) = parent_tenure_opt.as_ref() { + let tenure_start_block = parent_tenure.first().unwrap(); + Some(( + tenure_start_block.header.consensus_hash, + tenure_start_block.block_id(), + )) + } else if let Some(parent_block) = parent_block_opt.as_ref() { + let parent_header_info = + StacksChainState::get_stacks_block_header_info_by_index_block_hash( + stacks_node.chainstate.db(), + &last_tenure_id, + ) + .unwrap() + .unwrap(); + Some(( + parent_header_info.consensus_hash, + parent_header_info.index_block_hash(), + )) + } else { + None + }; + + let last_key = if let Some((ch, parent_tenure_start_block_id)) = + parent_consensus_hash_and_tenure_start_id_opt.clone() + { + // it's possible that the parent was a shadow block. + // if so, find the highest non-shadow ancestor's block-commit, so we can + let mut cursor = ch; + let (tenure_sn, tenure_block_commit) = loop { + let tenure_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cursor) + .unwrap() + .unwrap(); + + let Some(tenure_block_commit) = get_block_commit_by_txid( + sortdb.conn(), + &tenure_sn.sortition_id, + &tenure_sn.winning_block_txid, ) - .unwrap() - .unwrap(); - Some(parent_header_info.consensus_hash) - } else { - None - }; + .unwrap() else { + // parent must be a shadow block + let header = NakamotoChainState::get_block_header_nakamoto( + stacks_node.chainstate.db(), + &parent_tenure_start_block_id, + ) + .unwrap() + .unwrap() + .anchored_header + .as_stacks_nakamoto() + .cloned() + .unwrap(); + + if !header.is_shadow_block() { + panic!("Parent tenure start block ID {} has no block-commit and is not a shadow block", &parent_tenure_start_block_id); + } + + cursor = stacks_node + .chainstate + .index_conn() + .get_parent_tenure_consensus_hash(&parent_tenure_start_block_id, &cursor) + .unwrap() + .unwrap(); + + continue; + }; + break (tenure_sn, tenure_block_commit); + }; - let last_key = if let Some(ch) = parent_consensus_hash_opt.clone() { - let tenure_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &ch) - .unwrap() - .unwrap(); - let tenure_block_commit = get_block_commit_by_txid( - sortdb.conn(), - &tenure_sn.sortition_id, - &tenure_sn.winning_block_txid, - ) - .unwrap() - .unwrap(); let tenure_leader_key = SortitionDB::get_leader_key_at( &sortdb.index_conn(), tenure_block_commit.key_block_ptr.into(), @@ -1317,6 +1446,7 @@ impl<'a> TestPeer<'a> { block_builder, |_| true, ) + .unwrap() } /// Produce and process a Nakamoto tenure, after processing the block-commit from @@ -1332,7 +1462,7 @@ impl<'a> TestPeer<'a> { miner_setup: S, block_builder: F, after_block: G, - ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> + ) -> Result, ChainstateError> where S: FnMut(&mut NakamotoBlockBuilder), F: FnMut( @@ -1344,59 +1474,55 @@ impl<'a> TestPeer<'a> { G: FnMut(&mut NakamotoBlock) -> bool, { let cycle = self.get_reward_cycle(); - let mut stacks_node = self.stacks_node.take().unwrap(); - let mut sortdb = self.sortdb.take().unwrap(); - - // Ensure the signers are setup for the current cycle - signers.generate_aggregate_key(cycle); - - let blocks = TestStacksNode::make_nakamoto_tenure_blocks( - &mut stacks_node.chainstate, - &mut sortdb, - &mut self.miner, - signers, - &tenure_change - .try_as_tenure_change() - .unwrap() - .tenure_consensus_hash - .clone(), - Some(tenure_change), - Some(coinbase), - &mut self.coord, - miner_setup, - block_builder, - after_block, - self.mine_malleablized_blocks, - self.nakamoto_parent_tenure_opt.is_none(), - ); + self.with_dbs(|peer, sortdb, stacks_node, mempool| { + // Ensure the signers are setup for the current cycle + signers.generate_aggregate_key(cycle); - let just_blocks = blocks - .clone() - .into_iter() - .map(|(block, _, _, _)| block) - .collect(); - - stacks_node.add_nakamoto_tenure_blocks(just_blocks); + let blocks = TestStacksNode::make_nakamoto_tenure_blocks( + &mut stacks_node.chainstate, + sortdb, + &mut peer.miner, + signers, + &tenure_change + .try_as_tenure_change() + .unwrap() + .tenure_consensus_hash + .clone(), + Some(tenure_change), + Some(coinbase), + &mut peer.coord, + miner_setup, + block_builder, + after_block, + peer.mine_malleablized_blocks, + peer.nakamoto_parent_tenure_opt.is_none(), + )?; + + let just_blocks = blocks + .clone() + .into_iter() + .map(|(block, _, _, _)| block) + .collect(); - let mut malleablized_blocks: Vec = blocks - .clone() - .into_iter() - .map(|(_, _, _, malleablized)| malleablized) - .flatten() - .collect(); + stacks_node.add_nakamoto_tenure_blocks(just_blocks); - self.malleablized_blocks.append(&mut malleablized_blocks); + let mut malleablized_blocks: Vec = blocks + .clone() + .into_iter() + .map(|(_, _, _, malleablized)| malleablized) + .flatten() + .collect(); - let block_data = blocks - .clone() - .into_iter() - .map(|(blk, sz, cost, _)| (blk, sz, cost)) - .collect(); + peer.malleablized_blocks.append(&mut malleablized_blocks); - self.stacks_node = Some(stacks_node); - self.sortdb = Some(sortdb); + let block_data = blocks + .clone() + .into_iter() + .map(|(blk, sz, cost, _)| (blk, sz, cost)) + .collect(); - block_data + Ok(block_data) + }) } /// Produce and process a Nakamoto tenure extension. @@ -1460,7 +1586,8 @@ impl<'a> TestPeer<'a> { |_| true, self.mine_malleablized_blocks, self.nakamoto_parent_tenure_opt.is_none(), - ); + ) + .unwrap(); let just_blocks = blocks .clone() @@ -1832,7 +1959,7 @@ impl<'a> TestPeer<'a> { ); let parent_vrf_proof = NakamotoChainState::get_parent_vrf_proof( &mut chainstate.index_conn(), - &block.block_id(), + &block.header.parent_block_id, &sortdb.conn(), &block.header.consensus_hash, &tenure_block_commit.txid, @@ -2196,5 +2323,287 @@ impl<'a> TestPeer<'a> { ) .unwrap()); } + + // validate_shadow_parent_burnchain + // should always succeed + NakamotoChainState::validate_shadow_parent_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + block, + &tenure_block_commit, + ) + .unwrap(); + + if parent_block_header + .anchored_header + .as_stacks_nakamoto() + .map(|hdr| hdr.is_shadow_block()) + .unwrap_or(false) + { + // test error cases + let mut bad_tenure_block_commit_vtxindex = tenure_block_commit.clone(); + bad_tenure_block_commit_vtxindex.parent_vtxindex = 1; + + let mut bad_tenure_block_commit_block_ptr = tenure_block_commit.clone(); + bad_tenure_block_commit_block_ptr.parent_block_ptr += 1; + + let mut bad_block_no_parent = block.clone(); + bad_block_no_parent.header.parent_block_id = StacksBlockId([0x11; 32]); + + // not a problem if there's no (nakamoto) parent, since the parent can be a + // (non-shadow) epoch2 block not present in the staging chainstate + NakamotoChainState::validate_shadow_parent_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + &bad_block_no_parent, + &tenure_block_commit, + ) + .unwrap(); + + // should fail because vtxindex must be 0 + let ChainstateError::InvalidStacksBlock(_) = + NakamotoChainState::validate_shadow_parent_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + block, + &bad_tenure_block_commit_vtxindex, + ) + .unwrap_err() + else { + panic!("validate_shadow_parent_burnchain did not fail as expected"); + }; + + // should fail because it doesn't point to shadow tenure + let ChainstateError::InvalidStacksBlock(_) = + NakamotoChainState::validate_shadow_parent_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + block, + &bad_tenure_block_commit_block_ptr, + ) + .unwrap_err() + else { + panic!("validate_shadow_parent_burnchain did not fail as expected"); + }; + } + + if block.is_shadow_block() { + // block is stored + assert!(chainstate + .nakamoto_blocks_db() + .has_shadow_nakamoto_block_with_index_hash(&block.block_id()) + .unwrap()); + + // block is in a shadow tenure + assert!(chainstate + .nakamoto_blocks_db() + .is_shadow_tenure(&block.header.consensus_hash) + .unwrap()); + + // shadow tenure has a start block + assert!(chainstate + .nakamoto_blocks_db() + .get_shadow_tenure_start_block(&block.header.consensus_hash) + .unwrap() + .is_some()); + + // succeeds without burn + NakamotoChainState::validate_shadow_nakamoto_block_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + None, + &block, + false, + 0x80000000, + ) + .unwrap(); + + // succeeds with expected burn + NakamotoChainState::validate_shadow_nakamoto_block_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + Some(block.header.burn_spent), + &block, + false, + 0x80000000, + ) + .unwrap(); + + // fails with invalid burn + let ChainstateError::InvalidStacksBlock(_) = + NakamotoChainState::validate_shadow_nakamoto_block_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + Some(block.header.burn_spent + 1), + &block, + false, + 0x80000000, + ) + .unwrap_err() + else { + panic!("validate_shadow_nakamoto_block_burnchain succeeded when it shouldn't have"); + }; + + // block must be stored alreay + let mut bad_block = block.clone(); + bad_block.header.version += 1; + + // fails because block_id() isn't present + let ChainstateError::InvalidStacksBlock(_) = + NakamotoChainState::validate_shadow_nakamoto_block_burnchain( + chainstate.nakamoto_blocks_db(), + &sortdb.index_handle_at_tip(), + None, + &bad_block, + false, + 0x80000000, + ) + .unwrap_err() + else { + panic!("validate_shadow_nakamoto_block_burnchain succeeded when it shouldn't have"); + }; + + // VRF proof must be present + assert!(NakamotoChainState::get_shadow_vrf_proof( + &mut chainstate.index_conn(), + &block.block_id() + ) + .unwrap() + .is_some()); + } else { + // not a shadow block + assert!(!chainstate + .nakamoto_blocks_db() + .has_shadow_nakamoto_block_with_index_hash(&block.block_id()) + .unwrap()); + assert!(!chainstate + .nakamoto_blocks_db() + .is_shadow_tenure(&block.header.consensus_hash) + .unwrap()); + assert!(chainstate + .nakamoto_blocks_db() + .get_shadow_tenure_start_block(&block.header.consensus_hash) + .unwrap() + .is_none()); + assert!(NakamotoChainState::get_shadow_vrf_proof( + &mut chainstate.index_conn(), + &block.block_id() + ) + .unwrap() + .is_none()); + } + } + + /// Add a shadow tenure on a given tip. + /// * Advance the burnchain and create an empty sortition (so we have a new consensus hash) + /// * Generate a shadow block for the empty sortition + /// * Store the shadow block to the staging DB + /// * Process it + /// + /// Tests: + /// * NakamotoBlockHeader::get_shadow_signer_weight() + pub fn make_shadow_tenure(&mut self, tip: Option) -> NakamotoBlock { + let naka_tip_id = tip.unwrap_or(self.network.stacks_tip.block_id()); + let (_, _, tenure_id_consensus_hash) = self.next_burnchain_block(vec![]); + + test_debug!( + "\n\nMake shadow tenure for tenure {} off of tip {}\n\n", + &tenure_id_consensus_hash, + &naka_tip_id + ); + + let mut stacks_node = self.stacks_node.take().unwrap(); + let sortdb = self.sortdb.take().unwrap(); + + let shadow_block = NakamotoBlockBuilder::make_shadow_tenure( + &mut stacks_node.chainstate, + &sortdb, + naka_tip_id, + tenure_id_consensus_hash, + vec![], + ) + .unwrap(); + + // Get the reward set + let sort_tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let reward_set = load_nakamoto_reward_set( + self.miner + .burnchain + .block_height_to_reward_cycle(sort_tip_sn.block_height) + .expect("FATAL: no reward cycle for sortition"), + &sort_tip_sn.sortition_id, + &self.miner.burnchain, + &mut stacks_node.chainstate, + &shadow_block.header.parent_block_id, + &sortdb, + &OnChainRewardSetProvider::new(), + ) + .expect("Failed to load reward set") + .expect("Expected a reward set") + .0 + .known_selected_anchor_block_owned() + .expect("Unknown reward set"); + + // check signer weight + let mut max_signing_weight = 0; + for signer in reward_set.signers.as_ref().unwrap().iter() { + max_signing_weight += signer.weight; + } + + assert_eq!( + shadow_block + .header + .get_shadow_signer_weight(&reward_set) + .unwrap(), + max_signing_weight + ); + + // put it into Stacks staging DB + let tx = stacks_node.chainstate.staging_db_tx_begin().unwrap(); + tx.add_shadow_block(&shadow_block).unwrap(); + + // inserts of the same block are idempotent + tx.add_shadow_block(&shadow_block).unwrap(); + + tx.commit().unwrap(); + + let rollback_tx = stacks_node.chainstate.staging_db_tx_begin().unwrap(); + + if let Some(normal_tenure) = rollback_tx.conn().get_any_normal_tenure().unwrap() { + // can't insert into a non-shadow tenure + let mut bad_shadow_block_tenure = shadow_block.clone(); + bad_shadow_block_tenure.header.consensus_hash = normal_tenure; + + let ChainstateError::InvalidStacksBlock(_) = rollback_tx + .add_shadow_block(&bad_shadow_block_tenure) + .unwrap_err() + else { + panic!("add_shadow_block succeeded when it should have failed"); + }; + } + + // can't insert into the same height twice with different blocks + let mut bad_shadow_block_height = shadow_block.clone(); + bad_shadow_block_height.header.version += 1; + let ChainstateError::InvalidStacksBlock(_) = rollback_tx + .add_shadow_block(&bad_shadow_block_height) + .unwrap_err() + else { + panic!("add_shadow_block succeeded when it should have failed"); + }; + + drop(rollback_tx); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + + // process it + self.coord.handle_new_nakamoto_stacks_block().unwrap(); + + // verify that it processed + self.refresh_burnchain_view(); + assert_eq!(self.network.stacks_tip.block_id(), shadow_block.block_id()); + + shadow_block } } diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index fb36021152..f6f167d75b 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -1640,7 +1640,7 @@ pub mod test { pub fn instantiate_pox_peer_with_epoch<'a>( burnchain: &Burnchain, test_name: &str, - epochs: Option>, + epochs: Option, observer: Option<&'a TestEventObserver>, ) -> (TestPeer<'a>, Vec) { let mut peer_config = TestPeerConfig::new(test_name, 0, 0); diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 3134b4773a..dc65db0324 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -81,7 +81,7 @@ fn get_tip(sortdb: Option<&SortitionDB>) -> BlockSnapshot { SortitionDB::get_canonical_burn_chain_tip(&sortdb.unwrap().conn()).unwrap() } -fn make_test_epochs_pox() -> (Vec, PoxConstants) { +fn make_test_epochs_pox() -> (EpochList, PoxConstants) { let EMPTY_SORTITIONS = 25; let EPOCH_2_1_HEIGHT = EMPTY_SORTITIONS + 11; // 36 let EPOCH_2_2_HEIGHT = EPOCH_2_1_HEIGHT + 14; // 50 @@ -92,7 +92,7 @@ fn make_test_epochs_pox() -> (Vec, PoxConstants) { // cycle 11 = 60 - let epochs = vec![ + let epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -142,7 +142,7 @@ fn make_test_epochs_pox() -> (Vec, PoxConstants) { block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_4, }, - ]; + ]); let mut pox_constants = PoxConstants::mainnet_default(); pox_constants.reward_cycle_length = 5; @@ -279,7 +279,7 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 0); // produce blocks until immediately before the 2.1 epoch switch - while get_tip(peer.sortdb.as_ref()).block_height < epochs[3].start_height { + while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch21].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); // alice is still locked, balance should be 0 @@ -377,7 +377,7 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 512 * POX_THRESHOLD_STEPS_USTX); // now, let's roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should always include this half lockup let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -394,7 +394,8 @@ fn simple_pox_lockup_transition_pox_2() { assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); // now, roll the chain forward to Epoch-2.4 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should always be unlocked let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -612,7 +613,8 @@ fn pox_auto_unlock(alice_first: bool) { let mut coinbase_nonce = 0; // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[3].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch21].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -762,7 +764,8 @@ fn pox_auto_unlock(alice_first: bool) { // now, lets check behavior in Epochs 2.2-2.4, with pox-3 auto unlock tests // produce blocks until epoch 2.2 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[4].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch22].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); assert_eq!(alice_balance, 0); @@ -774,7 +777,8 @@ fn pox_auto_unlock(alice_first: bool) { assert_eq!(alice_balance, 1024 * POX_THRESHOLD_STEPS_USTX); // produce blocks until epoch 2.4 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1051,7 +1055,8 @@ fn delegate_stack_increase() { let mut coinbase_nonce = 0; // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[3].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch21].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1236,7 +1241,7 @@ fn delegate_stack_increase() { // on pox-3 // roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should always include this half lockup assert_eq!( @@ -1279,7 +1284,8 @@ fn delegate_stack_increase() { ); // Roll to Epoch-2.4 and re-do the above tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1666,7 +1672,8 @@ fn stack_increase() { let increase_amt = total_balance - first_lockup_amt; // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[3].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch21].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -1799,7 +1806,7 @@ fn stack_increase() { // on pox-3 // roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should always include this half lockup assert_eq!( @@ -1828,7 +1835,8 @@ fn stack_increase() { ); // Roll to Epoch-2.4 and re-do the above stack-increase tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -2242,7 +2250,7 @@ fn pox_extend_transition() { } // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[3].start_height { + while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch21].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); alice_rewards_to_v2_start_checks(latest_block, &mut peer); } @@ -2311,7 +2319,7 @@ fn pox_extend_transition() { // Roll to Epoch-2.4 and re-do the above tests // roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should be locked, and so should bob's let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -2338,7 +2346,8 @@ fn pox_extend_transition() { assert_eq!(bob_account.amount_unlocked(), INITIAL_BALANCE); // Roll to Epoch-2.4 and re-do the above stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -2611,7 +2620,8 @@ fn delegate_extend_pox_3() { let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // Roll to Epoch-2.4 and perform the delegate-stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -3087,7 +3097,8 @@ fn pox_3_getters() { let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // Roll to Epoch-2.4 and perform the delegate-stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -3491,10 +3502,12 @@ fn get_pox_addrs() { }; // produce blocks until epoch 2.2 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + { assert_latest_was_burn(&mut peer); } } @@ -3700,10 +3713,12 @@ fn stack_with_segwit() { }; // produce blocks until epoch 2.2 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + { assert_latest_was_burn(&mut peer); } } @@ -3882,7 +3897,8 @@ fn stack_aggregation_increase() { let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // Roll to Epoch-2.4 and perform the delegate-stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -4296,7 +4312,8 @@ fn pox_3_delegate_stx_addr_validation() { let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // Roll to Epoch-2.4 and perform the delegate-stack-extend tests - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[6].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch24].start_height + { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index d1cceae7cf..14dc9e75ab 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -146,7 +146,7 @@ fn make_simple_pox_4_lock( ) } -pub fn make_test_epochs_pox(use_nakamoto: bool) -> (Vec, PoxConstants) { +pub fn make_test_epochs_pox(use_nakamoto: bool) -> (EpochList, PoxConstants) { let EMPTY_SORTITIONS = 25; let EPOCH_2_1_HEIGHT = EMPTY_SORTITIONS + 11; // 36 let EPOCH_2_2_HEIGHT = EPOCH_2_1_HEIGHT + 14; // 50 @@ -157,7 +157,7 @@ pub fn make_test_epochs_pox(use_nakamoto: bool) -> (Vec, PoxConstan let EPOCH_2_5_HEIGHT = EPOCH_2_4_HEIGHT + 44; // 100 let EPOCH_3_0_HEIGHT = EPOCH_2_5_HEIGHT + 23; // 123 - let mut epochs = vec![ + let mut epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -220,7 +220,7 @@ pub fn make_test_epochs_pox(use_nakamoto: bool) -> (Vec, PoxConstan block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_5, }, - ]; + ]); if use_nakamoto { epochs.push(StacksEpoch { @@ -455,7 +455,7 @@ fn pox_extend_transition() { } // produce blocks until epoch 2.1 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[3].start_height { + while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch21].start_height { peer.tenure_with_txs(&[], &mut coinbase_nonce); alice_rewards_to_v2_start_checks(latest_block, &mut peer); } @@ -522,7 +522,7 @@ fn pox_extend_transition() { v2_rewards_checks(latest_block, &mut peer); // roll the chain forward until just before Epoch-2.2 - while get_tip(peer.sortdb.as_ref()).block_height < epochs[4].start_height { + while get_tip(peer.sortdb.as_ref()).block_height < epochs[StacksEpochId::Epoch22].start_height { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // at this point, alice's balance should be locked, and so should bob's let alice_balance = get_balance(&mut peer, &key_to_stacks_addr(&alice).into()); @@ -936,7 +936,8 @@ fn pox_lock_unlock() { while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + { assert_latest_was_burn(&mut peer); } } @@ -1116,7 +1117,8 @@ fn pox_3_defunct() { while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + { assert_latest_was_burn(&mut peer); } } @@ -1245,7 +1247,8 @@ fn pox_3_unlocks() { while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + if get_tip(peer.sortdb.as_ref()).block_height > epochs[StacksEpochId::Epoch21].start_height + { assert_latest_was_burn(&mut peer); } } @@ -4334,7 +4337,7 @@ fn stack_agg_increase() { peer_config.burnchain.pox_constants.reward_cycle_length = 20; peer_config.burnchain.pox_constants.prepare_length = 5; let epochs = peer_config.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3 = &epochs[StacksEpochId::Epoch30]; let mut peer = TestPeer::new_with_observer(peer_config, Some(&observer)); let mut peer_nonce = 0; @@ -8882,7 +8885,9 @@ pub fn prepare_pox4_test<'a>( while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + if get_tip(peer.sortdb.as_ref()).block_height + > epochs[StacksEpochId::Epoch21].start_height + { assert_latest_was_burn(&mut peer); } } @@ -8981,7 +8986,8 @@ fn missed_slots_no_unlock() { let EMPTY_SORTITIONS = 25; let (epochs, mut pox_constants) = make_test_epochs_pox(false); - pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; + pox_constants.pox_4_activation_height = + u32::try_from(epochs[StacksEpochId::Epoch25].start_height).unwrap() + 1; let mut burnchain = Burnchain::default_unittest( 0, @@ -9013,7 +9019,8 @@ fn missed_slots_no_unlock() { + 1; // produce blocks until epoch 2.5 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[7].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch25].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); } @@ -9232,7 +9239,8 @@ fn no_lockups_2_5() { let EMPTY_SORTITIONS = 25; let (epochs, mut pox_constants) = make_test_epochs_pox(false); - pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; + pox_constants.pox_4_activation_height = + u32::try_from(epochs[StacksEpochId::Epoch25].start_height).unwrap() + 1; let mut burnchain = Burnchain::default_unittest( 0, @@ -9264,7 +9272,8 @@ fn no_lockups_2_5() { + 1; // produce blocks until epoch 2.5 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[7].start_height { + while get_tip(peer.sortdb.as_ref()).block_height <= epochs[StacksEpochId::Epoch25].start_height + { peer.tenure_with_txs(&[], &mut coinbase_nonce); } diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 7c81410e87..b05365d5ac 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -856,6 +856,14 @@ impl StacksChainState { burn_total ); + // in the case of shadow blocks, there will be zero burns. + // the coinbase is still generated, but it's rendered unspendable + let (this_burn_total, burn_total) = if burn_total == 0 { + (1, 1) + } else { + (this_burn_total, burn_total) + }; + // each participant gets a share of the coinbase proportional to the fraction it burned out // of all participants' burns. let coinbase_reward = participant diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 115678ada8..04f772da02 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -11048,8 +11048,8 @@ pub mod test { init_balances.push((addr.to_account_principal(), initial_balance)); peer_config.initial_balances = init_balances; let mut epochs = StacksEpoch::unit_test_2_1(0); - let num_epochs = epochs.len(); - epochs[num_epochs - 1].block_limit.runtime = 10_000_000; + let last_epoch = epochs.last_mut().unwrap(); + last_epoch.block_limit.runtime = 10_000_000; peer_config.epochs = Some(epochs); peer_config.burnchain.pox_constants.v1_unlock_height = 26; let burnchain = peer_config.burnchain.clone(); @@ -11373,9 +11373,9 @@ pub mod test { init_balances.push((addr.to_account_principal(), initial_balance)); peer_config.initial_balances = init_balances; let mut epochs = StacksEpoch::unit_test_2_1(0); - let num_epochs = epochs.len(); - epochs[num_epochs - 1].block_limit.runtime = 10_000_000; - epochs[num_epochs - 1].block_limit.read_length = 10_000_000; + let last_epoch = epochs.last_mut().unwrap(); + last_epoch.block_limit.runtime = 10_000_000; + last_epoch.block_limit.read_length = 10_000_000; peer_config.epochs = Some(epochs); peer_config.burnchain.pox_constants.v1_unlock_height = 26; let burnchain = peer_config.burnchain.clone(); diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 160e2dc60e..6b6f523f88 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -55,7 +55,7 @@ use crate::chainstate::burn::{ConsensusHash, ConsensusHashExtensions}; use crate::chainstate::nakamoto::{ HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConn, NAKAMOTO_CHAINSTATE_SCHEMA_1, NAKAMOTO_CHAINSTATE_SCHEMA_2, - NAKAMOTO_CHAINSTATE_SCHEMA_3, NAKAMOTO_CHAINSTATE_SCHEMA_4, + NAKAMOTO_CHAINSTATE_SCHEMA_3, NAKAMOTO_CHAINSTATE_SCHEMA_4, NAKAMOTO_CHAINSTATE_SCHEMA_5, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -299,14 +299,14 @@ impl DBConfig { }); match epoch_id { StacksEpochId::Epoch10 => true, - StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 7, - StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 7, - StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 7, - StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 7, + StacksEpochId::Epoch20 => version_u32 >= 1 && version_u32 <= 8, + StacksEpochId::Epoch2_05 => version_u32 >= 2 && version_u32 <= 8, + StacksEpochId::Epoch21 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch22 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch23 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch24 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch25 => version_u32 >= 3 && version_u32 <= 8, + StacksEpochId::Epoch30 => version_u32 >= 3 && version_u32 <= 8, } } } @@ -680,7 +680,7 @@ impl<'a> DerefMut for ChainstateTx<'a> { } } -pub const CHAINSTATE_VERSION: &'static str = "7"; +pub const CHAINSTATE_VERSION: &'static str = "8"; const CHAINSTATE_INITIAL_SCHEMA: &'static [&'static str] = &[ "PRAGMA foreign_keys = ON;", @@ -1087,28 +1087,24 @@ impl StacksChainState { while db_config.version != CHAINSTATE_VERSION { match db_config.version.as_str() { "1" => { - // migrate to 2 info!("Migrating chainstate schema from version 1 to 2"); for cmd in CHAINSTATE_SCHEMA_2.iter() { tx.execute_batch(cmd)?; } } "2" => { - // migrate to 3 info!("Migrating chainstate schema from version 2 to 3"); for cmd in CHAINSTATE_SCHEMA_3.iter() { tx.execute_batch(cmd)?; } } "3" => { - // migrate to nakamoto 1 info!("Migrating chainstate schema from version 3 to 4: nakamoto support"); for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_1.iter() { tx.execute_batch(cmd)?; } } "4" => { - // migrate to nakamoto 2 info!( "Migrating chainstate schema from version 4 to 5: fix nakamoto tenure typo" ); @@ -1117,14 +1113,12 @@ impl StacksChainState { } } "5" => { - // migrate to nakamoto 3 info!("Migrating chainstate schema from version 5 to 6: adds height_in_tenure field"); for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_3.iter() { tx.execute_batch(cmd)?; } } "6" => { - // migrate to nakamoto 3 info!( "Migrating chainstate schema from version 6 to 7: adds signer_stats table" ); @@ -1132,6 +1126,14 @@ impl StacksChainState { tx.execute_batch(cmd)?; } } + "7" => { + info!( + "Migrating chainstate schema from version 7 to 8: add index for nakamoto block headers" + ); + for cmd in NAKAMOTO_CHAINSTATE_SCHEMA_5.iter() { + tx.execute_batch(cmd)?; + } + } _ => { error!( "Invalid chain state database: expected version = {}, got {}", diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 35ba532667..e9de9139a2 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -578,6 +578,7 @@ impl StacksChainState { post_condition_mode: &TransactionPostConditionMode, origin_account: &StacksAccount, asset_map: &AssetMap, + txid: Txid, ) -> Result { let mut checked_fungible_assets: HashMap> = HashMap::new(); @@ -606,7 +607,7 @@ impl StacksChainState { if !condition_code.check(u128::from(*amount_sent_condition), amount_sent) { info!( "Post-condition check failure on STX owned by {}: {:?} {:?} {}", - account_principal, amount_sent_condition, condition_code, amount_sent + account_principal, amount_sent_condition, condition_code, amount_sent; "txid" => %txid ); return Ok(false); } @@ -650,7 +651,7 @@ impl StacksChainState { .get_fungible_tokens(&account_principal, &asset_id) .unwrap_or(0); if !condition_code.check(u128::from(*amount_sent_condition), amount_sent) { - info!("Post-condition check failure on fungible asset {} owned by {}: {} {:?} {}", &asset_id, account_principal, amount_sent_condition, condition_code, amount_sent); + info!("Post-condition check failure on fungible asset {} owned by {}: {} {:?} {}", &asset_id, account_principal, amount_sent_condition, condition_code, amount_sent; "txid" => %txid); return Ok(false); } @@ -684,7 +685,7 @@ impl StacksChainState { .get_nonfungible_tokens(&account_principal, &asset_id) .unwrap_or(&empty_assets); if !condition_code.check(asset_value, assets_sent) { - info!("Post-condition check failure on non-fungible asset {} owned by {}: {:?} {:?}", &asset_id, account_principal, &asset_value, condition_code); + info!("Post-condition check failure on non-fungible asset {} owned by {}: {:?} {:?}", &asset_id, account_principal, &asset_value, condition_code; "txid" => %txid); return Ok(false); } @@ -726,18 +727,18 @@ impl StacksChainState { // each value must be covered for v in values { if !nfts.contains(&v.clone().try_into()?) { - info!("Post-condition check failure: Non-fungible asset {} value {:?} was moved by {} but not checked", &asset_identifier, &v, &principal); + info!("Post-condition check failure: Non-fungible asset {} value {:?} was moved by {} but not checked", &asset_identifier, &v, &principal; "txid" => %txid); return Ok(false); } } } else { // no values covered - info!("Post-condition check failure: No checks for non-fungible asset type {} moved by {}", &asset_identifier, &principal); + info!("Post-condition check failure: No checks for non-fungible asset type {} moved by {}", &asset_identifier, &principal; "txid" => %txid); return Ok(false); } } else { // no NFT for this principal - info!("Post-condition check failure: No checks for any non-fungible assets, but moved {} by {}", &asset_identifier, &principal); + info!("Post-condition check failure: No checks for any non-fungible assets, but moved {} by {}", &asset_identifier, &principal; "txid" => %txid); return Ok(false); } } @@ -747,11 +748,11 @@ impl StacksChainState { checked_fungible_assets.get(&principal) { if !checked_ft_asset_ids.contains(&asset_identifier) { - info!("Post-condition check failure: checks did not cover transfer of {} by {}", &asset_identifier, &principal); + info!("Post-condition check failure: checks did not cover transfer of {} by {}", &asset_identifier, &principal; "txid" => %txid); return Ok(false); } } else { - info!("Post-condition check failure: No checks for fungible token type {} moved by {}", &asset_identifier, &principal); + info!("Post-condition check failure: No checks for fungible token type {} moved by {}", &asset_identifier, &principal; "txid" => %txid); return Ok(false); } } @@ -980,14 +981,14 @@ impl StacksChainState { // Their presence in this variant makes the transaction invalid. if tx.post_conditions.len() > 0 { let msg = format!("Invalid Stacks transaction: TokenTransfer transactions do not support post-conditions"); - info!("{}", &msg); + info!("{}", &msg; "txid" => %tx.txid()); return Err(Error::InvalidStacksTransaction(msg, false)); } if *addr == origin_account.principal { let msg = format!("Invalid TokenTransfer: address tried to send to itself"); - info!("{}", &msg); + info!("{}", &msg; "txid" => %tx.txid()); return Err(Error::InvalidStacksTransaction(msg, false)); } @@ -1039,6 +1040,7 @@ impl StacksChainState { &tx.post_condition_mode, origin_account, asset_map, + tx.txid(), ) .expect("FATAL: error while evaluating post-conditions") }, @@ -1274,6 +1276,7 @@ impl StacksChainState { &tx.post_condition_mode, origin_account, asset_map, + tx.txid(), ) .expect("FATAL: error while evaluating post-conditions") }, @@ -6873,6 +6876,7 @@ pub mod test { mode, origin, &ft_transfer_2, + Txid([0; 32]), ) .unwrap(); if result != expected_result { @@ -7226,6 +7230,7 @@ pub mod test { mode, origin, &nft_transfer_2, + Txid([0; 32]), ) .unwrap(); if result != expected_result { @@ -8043,6 +8048,7 @@ pub mod test { post_condition_mode, origin_account, asset_map, + Txid([0; 32]), ) .unwrap(); if result != expected_result { diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 52afaceb66..7da2ff1599 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -1159,13 +1159,13 @@ mod test { let initial_balance = 1000000000; let mut peer_config = TestPeerConfig::new(function_name!(), 7004, 7005); peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)]; - peer_config.epochs = Some(vec![StacksEpoch { + peer_config.epochs = Some(EpochList::new(&[StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, end_height: (i64::MAX) as u64, block_limit: BLOCK_LIMIT_MAINNET_20, network_epoch: PEER_VERSION_EPOCH_2_0, - }]); + }])); let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 7fb08335a2..7a72cc1652 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -287,6 +287,8 @@ pub struct TransactionSuccess { /// The fee that was charged to the user for doing this transaction. pub fee: u64, pub receipt: StacksTransactionReceipt, + /// Whether the soft limit was reached after this transaction was processed. + pub soft_limit_reached: bool, } /// Represents a failed transaction. Something went wrong when processing this transaction. @@ -319,6 +321,7 @@ pub struct TransactionSuccessEvent { pub fee: u64, pub execution_cost: ExecutionCost, pub result: Value, + pub soft_limit_reached: bool, } /// Represents an event for a failed transaction. Something went wrong when processing this transaction. @@ -448,6 +451,24 @@ impl TransactionResult { tx: transaction.clone(), fee, receipt, + soft_limit_reached: false, + }) + } + + /// Creates a `TransactionResult` backed by `TransactionSuccess` with a soft limit reached. + /// This method logs "transaction success" as a side effect. + pub fn success_with_soft_limit( + transaction: &StacksTransaction, + fee: u64, + receipt: StacksTransactionReceipt, + soft_limit_reached: bool, + ) -> TransactionResult { + Self::log_transaction_success(transaction); + Self::Success(TransactionSuccess { + tx: transaction.clone(), + fee, + receipt, + soft_limit_reached, }) } @@ -499,14 +520,18 @@ impl TransactionResult { pub fn convert_to_event(&self) -> TransactionEvent { match &self { - TransactionResult::Success(TransactionSuccess { tx, fee, receipt }) => { - TransactionEvent::Success(TransactionSuccessEvent { - txid: tx.txid(), - fee: *fee, - execution_cost: receipt.execution_cost.clone(), - result: receipt.result.clone(), - }) - } + TransactionResult::Success(TransactionSuccess { + tx, + fee, + receipt, + soft_limit_reached, + }) => TransactionEvent::Success(TransactionSuccessEvent { + txid: tx.txid(), + fee: *fee, + execution_cost: receipt.execution_cost.clone(), + result: receipt.result.clone(), + soft_limit_reached: *soft_limit_reached, + }), TransactionResult::ProcessingError(TransactionError { tx, error }) => { TransactionEvent::ProcessingError(TransactionErrorEvent { txid: tx.txid(), @@ -540,11 +565,7 @@ impl TransactionResult { /// Otherwise crashes. pub fn unwrap(self) -> (u64, StacksTransactionReceipt) { match self { - TransactionResult::Success(TransactionSuccess { - tx: _, - fee, - receipt, - }) => (fee, receipt), + TransactionResult::Success(TransactionSuccess { fee, receipt, .. }) => (fee, receipt), _ => panic!("Tried to `unwrap` a non-success result."), } } @@ -585,7 +606,7 @@ impl TransactionResult { // recover original ClarityError ClarityRuntimeTxError::Acceptable { error, .. } => { if let clarity_error::Parse(ref parse_err) = error { - info!("Parse error: {}", parse_err); + info!("Parse error: {}", parse_err; "txid" => %tx.txid()); match &parse_err.err { ParseErrors::ExpressionStackDepthTooDeep | ParseErrors::VaryExpressionStackDepthTooDeep => { @@ -2366,7 +2387,12 @@ impl StacksBlockBuilder { let result_event = tx_result.convert_to_event(); match tx_result { - TransactionResult::Success(TransactionSuccess { receipt, .. }) => { + TransactionResult::Success(TransactionSuccess { + tx: _, + fee: _, + receipt, + soft_limit_reached, + }) => { if txinfo.metadata.time_estimate_ms.is_none() { // use i64 to avoid running into issues when storing in // rusqlite. @@ -2404,6 +2430,18 @@ impl StacksBlockBuilder { { mined_sponsor_nonces.insert(sponsor_addr, sponsor_nonce); } + if soft_limit_reached { + // done mining -- our soft limit execution budget is exceeded. + // Make the block from the transactions we did manage to get + debug!( + "Soft block budget exceeded on tx {}", + &txinfo.tx.txid() + ); + if block_limit_hit != BlockLimitFunction::CONTRACT_LIMIT_HIT { + debug!("Switch to mining stx-transfers only"); + block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; + } + } } TransactionResult::Skipped(TransactionSkipped { error, .. }) | TransactionResult::ProcessingError(TransactionError { diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 8af9cf6ec7..fd370a8b12 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1101,13 +1101,12 @@ pub const MAX_MICROBLOCK_SIZE: u32 = 65536; #[cfg(test)] pub mod test { - use clarity::util::get_epoch_time_secs; use clarity::vm::representations::{ClarityName, ContractName}; use clarity::vm::ClarityVersion; use stacks_common::bitvec::BitVec; use stacks_common::util::hash::*; - use stacks_common::util::log; use stacks_common::util::secp256k1::Secp256k1PrivateKey; + use stacks_common::util::{get_epoch_time_secs, log}; use super::*; use crate::chainstate::burn::BlockSnapshot; diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index 9033803325..9ca3016a1b 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -91,7 +91,7 @@ fn test_bad_microblock_fees_pre_v210() { (addr_anchored.to_account_principal(), 1000000000), ]; - let epochs = vec![ + let epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -119,7 +119,7 @@ fn test_bad_microblock_fees_pre_v210() { }, network_epoch: PEER_VERSION_EPOCH_2_05, }, - ]; + ]); peer_config.epochs = Some(epochs); let burnchain = peer_config.burnchain.clone(); @@ -408,7 +408,7 @@ fn test_bad_microblock_fees_fix_transition() { ]; let burnchain = peer_config.burnchain.clone(); - let epochs = vec![ + let epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -443,7 +443,7 @@ fn test_bad_microblock_fees_fix_transition() { }, network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]; + ]); peer_config.epochs = Some(epochs); let num_blocks = 10; @@ -765,7 +765,7 @@ fn test_get_block_info_v210() { ]; let burnchain = peer_config.burnchain.clone(); - let epochs = vec![ + let epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -800,7 +800,7 @@ fn test_get_block_info_v210() { }, network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]; + ]); peer_config.epochs = Some(epochs); let num_blocks = 10; @@ -1137,7 +1137,7 @@ fn test_get_block_info_v210_no_microblocks() { ]; let burnchain = peer_config.burnchain.clone(); - let epochs = vec![ + let epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -1172,7 +1172,7 @@ fn test_get_block_info_v210_no_microblocks() { }, network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]; + ]); peer_config.epochs = Some(epochs); let num_blocks = 10; @@ -1457,7 +1457,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { (addr_anchored.to_account_principal(), 1000000000), ]; - let epochs = vec![ + let epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -1492,7 +1492,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { }, network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]; + ]); peer_config.epochs = Some(epochs); let burnchain = peer_config.burnchain.clone(); diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 352679c209..7b7720b996 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -614,7 +614,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; let burnchain = peer_config.burnchain.clone(); - let epochs = vec![ + let epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -642,7 +642,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { }, network_epoch: PEER_VERSION_EPOCH_2_05, }, - ]; + ]); peer_config.epochs = Some(epochs); let num_blocks = 10; @@ -850,7 +850,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; let burnchain = peer_config.burnchain.clone(); - let epochs = vec![ + let epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -878,7 +878,7 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { }, network_epoch: PEER_VERSION_EPOCH_2_05, }, - ]; + ]); peer_config.epochs = Some(epochs); let num_blocks = 10; @@ -1344,7 +1344,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { let mut peer_config = TestPeerConfig::new(function_name!(), 2006, 2007); peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(vec![StacksEpoch { + peer_config.epochs = Some(EpochList::new(&[StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, end_height: i64::MAX as u64, @@ -1358,7 +1358,7 @@ fn test_build_anchored_blocks_skip_too_expensive() { runtime: 3350, }, network_epoch: PEER_VERSION_EPOCH_2_0, - }]); + }])); let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -3459,7 +3459,7 @@ fn test_contract_call_across_clarity_versions() { ]; let burnchain = peer_config.burnchain.clone(); - let epochs = vec![ + let epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -3488,7 +3488,7 @@ fn test_contract_call_across_clarity_versions() { block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]; + ]); peer_config.epochs = Some(epochs); let num_blocks = 10; @@ -4033,7 +4033,7 @@ fn test_is_tx_problematic() { let mut peer_config = TestPeerConfig::new(function_name!(), 2018, 2019); peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(vec![ + peer_config.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -4048,7 +4048,7 @@ fn test_is_tx_problematic() { block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_05, }, - ]); + ])); let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -4506,7 +4506,7 @@ fn mempool_incorporate_pox_unlocks() { let mut peer_config = TestPeerConfig::new(function_name!(), 2020, 2021); peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(vec![ + peer_config.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -4528,9 +4528,9 @@ fn mempool_incorporate_pox_unlocks() { block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]); + ])); peer_config.burnchain.pox_constants.v1_unlock_height = - peer_config.epochs.as_ref().unwrap()[1].end_height as u32 + 1; + peer_config.epochs.as_ref().unwrap()[StacksEpochId::Epoch2_05].end_height as u32 + 1; let pox_constants = peer_config.burnchain.pox_constants.clone(); let burnchain = peer_config.burnchain.clone(); diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 8b66c019f0..9a6a84507e 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -81,7 +81,7 @@ pub fn path_join(dir: &str, path: &str) -> String { // copy src to dest pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { - eprintln!("Copy directory {} to {}", src_dir, dest_dir); + eprintln!("Copy directory {src_dir} to {dest_dir}"); let mut dir_queue = VecDeque::new(); dir_queue.push_back("/".to_string()); @@ -91,7 +91,7 @@ pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { let next_src_dir = path_join(&src_dir, &next_dir); let next_dest_dir = path_join(&dest_dir, &next_dir); - eprintln!("mkdir {}", &next_dest_dir); + eprintln!("mkdir {next_dest_dir}"); fs::create_dir_all(&next_dest_dir)?; for dirent_res in fs::read_dir(&next_src_dir)? { @@ -100,11 +100,11 @@ pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { let md = fs::metadata(&path)?; if md.is_dir() { let frontier = path_join(&next_dir, &dirent.file_name().to_str().unwrap()); - eprintln!("push {}", &frontier); + eprintln!("push {frontier}"); dir_queue.push_back(frontier); } else { let dest_path = path_join(&next_dest_dir, &dirent.file_name().to_str().unwrap()); - eprintln!("copy {} to {}", &path.to_str().unwrap(), &dest_path); + eprintln!("copy {} to {dest_path}", &path.to_str().unwrap()); fs::copy(path, dest_path)?; } } @@ -475,14 +475,14 @@ impl TestStacksNode { }; if StacksChainState::has_stored_block( - &self.chainstate.db(), + self.chainstate.db(), &self.chainstate.blocks_path, &consensus_hash, &bc.block_header_hash, ) .unwrap() && !StacksChainState::is_block_orphaned( - &self.chainstate.db(), + self.chainstate.db(), &consensus_hash, &bc.block_header_hash, ) @@ -583,11 +583,10 @@ impl TestStacksNode { ); test_debug!( - "Miner {}: Block commit transaction builds on {},{} (parent snapshot is {:?})", + "Miner {}: Block commit transaction builds on {},{} (parent snapshot is {parent_block_snapshot_opt:?})", miner.id, block_commit_op.parent_block_ptr, - block_commit_op.parent_vtxindex, - &parent_block_snapshot_opt + block_commit_op.parent_vtxindex ); self.commit_ops.insert( block_commit_op.block_header_hash.clone(), @@ -767,16 +766,15 @@ pub fn preprocess_stacks_block_data( { Some(sn) => sn, None => { - test_debug!("Block commit did not win sorition: {:?}", block_commit_op); + test_debug!("Block commit did not win sorition: {block_commit_op:?}"); return None; } }; // "discover" this stacks block test_debug!( - "\n\nPreprocess Stacks block {}/{} ({})", + "\n\nPreprocess Stacks block {}/{block_hash} ({})", &commit_snapshot.consensus_hash, - &block_hash, StacksBlockHeader::make_index_block_hash(&commit_snapshot.consensus_hash, &block_hash) ); let block_res = node @@ -793,8 +791,7 @@ pub fn preprocess_stacks_block_data( // "discover" this stacks microblock stream for mblock in stacks_microblocks.iter() { test_debug!( - "Preprocess Stacks microblock {}-{} (seq {})", - &block_hash, + "Preprocess Stacks microblock {block_hash}-{} (seq {})", mblock.block_hash(), mblock.header.sequence ); @@ -828,11 +825,9 @@ pub fn check_block_state_index_root( .read_block_root_hash(&index_block_hash) .unwrap(); test_debug!( - "checking {}/{} state root: expecting {}, got {}", - consensus_hash, + "checking {consensus_hash}/{} state root: expecting {}, got {state_root}", &stacks_header.block_hash(), - &stacks_header.state_index_root, - &state_root + &stacks_header.state_index_root ); state_root == stacks_header.state_index_root } @@ -888,9 +883,8 @@ pub fn check_mining_reward( let mut total: u128 = 10_000_000_000 - spent_total; test_debug!( - "Miner {} has spent {} in total so far", - &miner.origin_address().unwrap(), - spent_total + "Miner {} has spent {spent_total} in total so far", + &miner.origin_address().unwrap() ); if block_height >= MINER_REWARD_MATURITY { @@ -908,13 +902,10 @@ pub fn check_mining_reward( let reward = recipient.coinbase + anchored + (3 * streamed / 5); test_debug!( - "Miner {} received a reward {} = {} + {} + {} at block {}", + "Miner {} received a reward {reward} = {} + {anchored} + {} at block {i}", &recipient.address.to_string(), - reward, recipient.coinbase, - anchored, (3 * streamed / 5), - i ); total += reward; found = true; @@ -922,9 +913,8 @@ pub fn check_mining_reward( } if !found { test_debug!( - "Miner {} received no reward at block {}", - miner.origin_address().unwrap(), - i + "Miner {} received no reward at block {i}", + miner.origin_address().unwrap() ); } } @@ -945,11 +935,9 @@ pub fn check_mining_reward( &parent_reward.block_hash, ); test_debug!( - "Miner {} received a produced-stream reward {} from {} confirmed at {}", + "Miner {} received a produced-stream reward {parent_streamed} from {} confirmed at {confirmed_block_height}", miner.origin_address().unwrap().to_string(), - parent_streamed, - heights.get(&parent_ibh).unwrap(), - confirmed_block_height + heights.get(&parent_ibh).unwrap() ); total += parent_streamed; } @@ -967,7 +955,7 @@ pub fn check_mining_reward( return total == 0; } else { if amount != total { - test_debug!("Amount {} != {}", amount, total); + test_debug!("Amount {amount} != {total}"); return false; } return true; @@ -1091,16 +1079,14 @@ pub fn make_smart_contract_with_version( (begin (var-set bar (/ x y)) (ok (var-get bar))))"; test_debug!( - "Make smart contract block at hello-world-{}-{}", - burnchain_height, - stacks_block_height + "Make smart contract block at hello-world-{burnchain_height}-{stacks_block_height}" ); let mut tx_contract = StacksTransaction::new( TransactionVersion::Testnet, miner.as_transaction_auth().unwrap(), TransactionPayload::new_smart_contract( - &format!("hello-world-{}-{}", burnchain_height, stacks_block_height), + &format!("hello-world-{burnchain_height}-{stacks_block_height}"), &contract.to_string(), version, ) @@ -1140,7 +1126,7 @@ pub fn make_contract_call( miner.as_transaction_auth().unwrap(), TransactionPayload::new_contract_call( addr.clone(), - &format!("hello-world-{}-{}", burnchain_height, stacks_block_height), + &format!("hello-world-{burnchain_height}-{stacks_block_height}"), "set-bar", vec![Value::Int(arg1), Value::Int(arg2)], ) diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 9ff6e55644..f703f8a367 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -47,13 +47,14 @@ use crate::util_lib::db::IndexDBTx; /// Can be used with CLI commands to support non-mainnet chainstate /// Allows integration testing of these functions +#[derive(Deserialize)] pub struct StacksChainConfig { pub chain_id: u32, pub first_block_height: u64, pub first_burn_header_hash: BurnchainHeaderHash, pub first_burn_header_timestamp: u64, pub pox_constants: PoxConstants, - pub epochs: Vec, + pub epochs: EpochList, } impl StacksChainConfig { @@ -65,7 +66,45 @@ impl StacksChainConfig { .unwrap(), first_burn_header_timestamp: BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP.into(), pox_constants: PoxConstants::mainnet_default(), - epochs: STACKS_EPOCHS_MAINNET.to_vec(), + epochs: (*STACKS_EPOCHS_MAINNET).clone(), + } + } + + pub fn default_testnet() -> Self { + let mut pox_constants = PoxConstants::regtest_default(); + pox_constants.prepare_length = 100; + pox_constants.reward_cycle_length = 900; + pox_constants.v1_unlock_height = 3; + pox_constants.v2_unlock_height = 5; + pox_constants.pox_3_activation_height = 5; + pox_constants.pox_4_activation_height = 6; + pox_constants.v3_unlock_height = 7; + let mut epochs = EpochList::new(&*STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch10].start_height = 0; + epochs[StacksEpochId::Epoch10].end_height = 0; + epochs[StacksEpochId::Epoch20].start_height = 0; + epochs[StacksEpochId::Epoch20].end_height = 1; + epochs[StacksEpochId::Epoch2_05].start_height = 1; + epochs[StacksEpochId::Epoch2_05].end_height = 2; + epochs[StacksEpochId::Epoch21].start_height = 2; + epochs[StacksEpochId::Epoch21].end_height = 3; + epochs[StacksEpochId::Epoch22].start_height = 3; + epochs[StacksEpochId::Epoch22].end_height = 4; + epochs[StacksEpochId::Epoch23].start_height = 4; + epochs[StacksEpochId::Epoch23].end_height = 5; + epochs[StacksEpochId::Epoch24].start_height = 5; + epochs[StacksEpochId::Epoch24].end_height = 6; + epochs[StacksEpochId::Epoch25].start_height = 6; + epochs[StacksEpochId::Epoch25].end_height = 56_457; + epochs[StacksEpochId::Epoch30].start_height = 56_457; + Self { + chain_id: CHAIN_ID_TESTNET, + first_block_height: 0, + first_burn_header_hash: BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH) + .unwrap(), + first_burn_header_timestamp: BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), + pox_constants, + epochs, } } } @@ -151,6 +190,91 @@ pub fn command_replay_block(argv: &[String], conf: Option<&StacksChainConfig>) { println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); } +/// Replay blocks from chainstate database +/// Terminates on error using `process::exit()` +/// +/// Arguments: +/// - `argv`: Args in CLI format: ` [args...]` +pub fn command_replay_block_nakamoto(argv: &[String], conf: Option<&StacksChainConfig>) { + let print_help_and_exit = || -> ! { + let n = &argv[0]; + eprintln!("Usage:"); + eprintln!(" {n} "); + eprintln!(" {n} prefix "); + eprintln!(" {n} index-range "); + eprintln!(" {n} range "); + eprintln!(" {n} "); + process::exit(1); + }; + let start = Instant::now(); + let db_path = argv.get(1).unwrap_or_else(|| print_help_and_exit()); + let mode = argv.get(2).map(String::as_str); + + let chain_state_path = format!("{db_path}/chainstate/"); + + let default_conf = STACKS_CHAIN_CONFIG_DEFAULT_MAINNET; + let conf = conf.unwrap_or(&default_conf); + + let mainnet = conf.chain_id == CHAIN_ID_MAINNET; + let (chainstate, _) = + StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + + let conn = chainstate.nakamoto_blocks_db(); + + let query = match mode { + Some("prefix") => format!( + "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 AND index_block_hash LIKE \"{}%\"", + argv[3] + ), + Some("first") => format!( + "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {}", + argv[3] + ), + Some("range") => { + let arg4 = argv[3] + .parse::() + .expect(" not a valid u64"); + let arg5 = argv[4].parse::().expect(" not a valid u64"); + let start = arg4.saturating_sub(1); + let blocks = arg5.saturating_sub(arg4); + format!("SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height ASC LIMIT {start}, {blocks}") + } + Some("index-range") => { + let start = argv[3] + .parse::() + .expect(" not a valid u64"); + let end = argv[4].parse::().expect(" not a valid u64"); + let blocks = end.saturating_sub(start); + format!("SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY index_block_hash ASC LIMIT {start}, {blocks}") + } + Some("last") => format!( + "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0 ORDER BY height DESC LIMIT {}", + argv[3] + ), + Some(_) => print_help_and_exit(), + // Default to ALL blocks + None => "SELECT index_block_hash FROM nakamoto_staging_blocks WHERE orphaned = 0".into(), + }; + + let mut stmt = conn.prepare(&query).unwrap(); + let mut hashes_set = stmt.query(NO_PARAMS).unwrap(); + + let mut index_block_hashes: Vec = vec![]; + while let Ok(Some(row)) = hashes_set.next() { + index_block_hashes.push(row.get(0).unwrap()); + } + + let total = index_block_hashes.len(); + println!("Will check {total} blocks"); + for (i, index_block_hash) in index_block_hashes.iter().enumerate() { + if i % 100 == 0 { + println!("Checked {i}..."); + } + replay_naka_staging_block(db_path, index_block_hash, &conf); + } + println!("Finished. run_time_seconds = {}", start.elapsed().as_secs()); +} + /// Replay mock mined blocks from JSON files /// Terminates on error using `process::exit()` /// @@ -525,11 +649,39 @@ fn replay_block( }; } +/// Fetch and process a NakamotoBlock from database and call `replay_block_nakamoto()` to validate +fn replay_naka_staging_block(db_path: &str, index_block_hash_hex: &str, conf: &StacksChainConfig) { + let block_id = StacksBlockId::from_hex(index_block_hash_hex).unwrap(); + let chain_state_path = format!("{db_path}/chainstate/"); + let sort_db_path = format!("{db_path}/burnchain/sortition"); + + let mainnet = conf.chain_id == CHAIN_ID_MAINNET; + let (mut chainstate, _) = + StacksChainState::open(mainnet, conf.chain_id, &chain_state_path, None).unwrap(); + + let mut sortdb = SortitionDB::connect( + &sort_db_path, + conf.first_block_height, + &conf.first_burn_header_hash, + conf.first_burn_header_timestamp, + &conf.epochs, + conf.pox_constants.clone(), + None, + true, + ) + .unwrap(); + + let (block, block_size) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&block_id) + .unwrap() + .unwrap(); + replay_block_nakamoto(&mut sortdb, &mut chainstate, &block, block_size).unwrap(); +} + fn replay_block_nakamoto( sort_db: &mut SortitionDB, stacks_chain_state: &mut StacksChainState, - mut chainstate_tx: ChainstateTx, - clarity_instance: &mut ClarityInstance, block: &NakamotoBlock, block_size: u64, ) -> Result<(), ChainstateError> { @@ -758,6 +910,7 @@ fn replay_block_nakamoto( commit_burn, sortition_burn, &active_reward_set, + true, ) { Ok(next_chain_tip_info) => (Some(next_chain_tip_info), None), Err(e) => (None, Some(e)), @@ -785,18 +938,5 @@ fn replay_block_nakamoto( return Err(e); }; - let (receipt, clarity_commit, reward_set_data) = ok_opt.expect("FATAL: unreachable"); - - assert_eq!( - receipt.header.anchored_header.block_hash(), - block.header.block_hash() - ); - assert_eq!(receipt.header.consensus_hash, block.header.consensus_hash); - - info!( - "Advanced to new tip! {}/{}", - &receipt.header.consensus_hash, - &receipt.header.anchored_header.block_hash() - ); Ok(()) } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index bf2b5aff57..46ff54924b 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -537,10 +537,13 @@ pub struct MemPoolWalkSettings { pub txs_to_consider: HashSet, /// Origins for transactions that we'll consider pub filter_origins: HashSet, + /// What percentage of the remaining cost limit should we consume before stopping the walk + /// None means we consume the entire cost limit ASAP + pub tenure_cost_limit_per_block_percentage: Option, } -impl MemPoolWalkSettings { - pub fn default() -> MemPoolWalkSettings { +impl Default for MemPoolWalkSettings { + fn default() -> Self { MemPoolWalkSettings { max_walk_time_ms: u64::MAX, consider_no_estimate_tx_prob: 5, @@ -554,8 +557,11 @@ impl MemPoolWalkSettings { .into_iter() .collect(), filter_origins: HashSet::new(), + tenure_cost_limit_per_block_percentage: None, } } +} +impl MemPoolWalkSettings { pub fn zero() -> MemPoolWalkSettings { MemPoolWalkSettings { max_walk_time_ms: u64::MAX, @@ -570,6 +576,7 @@ impl MemPoolWalkSettings { .into_iter() .collect(), filter_origins: HashSet::new(), + tenure_cost_limit_per_block_percentage: None, } } } diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 491ba21ca0..bb850a784c 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -19,8 +19,8 @@ use std::collections::HashSet; use clarity::vm::costs::ExecutionCost; use lazy_static::lazy_static; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; -use stacks_common::types::StacksEpoch as GenericStacksEpoch; pub use stacks_common::types::StacksEpochId; +use stacks_common::types::{EpochList as GenericEpochList, StacksEpoch as GenericStacksEpoch}; use stacks_common::util::log; pub use self::mempool::MemPoolDB; @@ -35,6 +35,7 @@ pub mod tests; use std::cmp::Ordering; pub type StacksEpoch = GenericStacksEpoch; +pub type EpochList = GenericEpochList; // fork set identifier -- to be mixed with the consensus hash (encodes the version) pub const SYSTEM_FORK_SET_VERSION: [u8; 4] = [23u8, 0u8, 0u8, 0u8]; @@ -237,7 +238,7 @@ pub fn check_fault_injection(fault_name: &str) -> bool { } lazy_static! { - pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 9] = [ + pub static ref STACKS_EPOCHS_MAINNET: EpochList = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -301,11 +302,11 @@ lazy_static! { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, - ]; + ]); } lazy_static! { - pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 9] = [ + pub static ref STACKS_EPOCHS_TESTNET: EpochList = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -369,11 +370,11 @@ lazy_static! { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, - ]; + ]); } lazy_static! { - pub static ref STACKS_EPOCHS_REGTEST: [StacksEpoch; 9] = [ + pub static ref STACKS_EPOCHS_REGTEST: EpochList = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -437,7 +438,7 @@ lazy_static! { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 }, - ]; + ]); } /// Stacks 2.05 epoch marker. All block-commits in 2.05 must have a memo bitfield with this value @@ -470,51 +471,183 @@ pub static STACKS_EPOCH_3_0_MARKER: u8 = 0x0b; #[test] fn test_ord_for_stacks_epoch() { - let epochs = STACKS_EPOCHS_MAINNET.clone(); - assert_eq!(epochs[0].cmp(&epochs[1]), Ordering::Less); - assert_eq!(epochs[1].cmp(&epochs[2]), Ordering::Less); - assert_eq!(epochs[0].cmp(&epochs[2]), Ordering::Less); - assert_eq!(epochs[0].cmp(&epochs[0]), Ordering::Equal); - assert_eq!(epochs[1].cmp(&epochs[1]), Ordering::Equal); - assert_eq!(epochs[2].cmp(&epochs[2]), Ordering::Equal); - assert_eq!(epochs[3].cmp(&epochs[3]), Ordering::Equal); - assert_eq!(epochs[4].cmp(&epochs[4]), Ordering::Equal); - assert_eq!(epochs[2].cmp(&epochs[0]), Ordering::Greater); - assert_eq!(epochs[2].cmp(&epochs[1]), Ordering::Greater); - assert_eq!(epochs[1].cmp(&epochs[0]), Ordering::Greater); - assert_eq!(epochs[3].cmp(&epochs[0]), Ordering::Greater); - assert_eq!(epochs[3].cmp(&epochs[1]), Ordering::Greater); - assert_eq!(epochs[3].cmp(&epochs[2]), Ordering::Greater); - assert_eq!(epochs[4].cmp(&epochs[0]), Ordering::Greater); - assert_eq!(epochs[4].cmp(&epochs[1]), Ordering::Greater); - assert_eq!(epochs[4].cmp(&epochs[2]), Ordering::Greater); - assert_eq!(epochs[4].cmp(&epochs[3]), Ordering::Greater); - assert_eq!(epochs[5].cmp(&epochs[0]), Ordering::Greater); - assert_eq!(epochs[5].cmp(&epochs[1]), Ordering::Greater); - assert_eq!(epochs[5].cmp(&epochs[2]), Ordering::Greater); - assert_eq!(epochs[5].cmp(&epochs[3]), Ordering::Greater); - assert_eq!(epochs[5].cmp(&epochs[4]), Ordering::Greater); - assert_eq!(epochs[6].cmp(&epochs[0]), Ordering::Greater); - assert_eq!(epochs[6].cmp(&epochs[1]), Ordering::Greater); - assert_eq!(epochs[6].cmp(&epochs[2]), Ordering::Greater); - assert_eq!(epochs[6].cmp(&epochs[3]), Ordering::Greater); - assert_eq!(epochs[6].cmp(&epochs[4]), Ordering::Greater); - assert_eq!(epochs[6].cmp(&epochs[5]), Ordering::Greater); - assert_eq!(epochs[7].cmp(&epochs[0]), Ordering::Greater); - assert_eq!(epochs[7].cmp(&epochs[1]), Ordering::Greater); - assert_eq!(epochs[7].cmp(&epochs[2]), Ordering::Greater); - assert_eq!(epochs[7].cmp(&epochs[3]), Ordering::Greater); - assert_eq!(epochs[7].cmp(&epochs[4]), Ordering::Greater); - assert_eq!(epochs[7].cmp(&epochs[5]), Ordering::Greater); - assert_eq!(epochs[7].cmp(&epochs[6]), Ordering::Greater); - assert_eq!(epochs[8].cmp(&epochs[0]), Ordering::Greater); - assert_eq!(epochs[8].cmp(&epochs[1]), Ordering::Greater); - assert_eq!(epochs[8].cmp(&epochs[2]), Ordering::Greater); - assert_eq!(epochs[8].cmp(&epochs[3]), Ordering::Greater); - assert_eq!(epochs[8].cmp(&epochs[4]), Ordering::Greater); - assert_eq!(epochs[8].cmp(&epochs[5]), Ordering::Greater); - assert_eq!(epochs[8].cmp(&epochs[6]), Ordering::Greater); - assert_eq!(epochs[8].cmp(&epochs[7]), Ordering::Greater); + let epochs = &*STACKS_EPOCHS_MAINNET; + assert_eq!( + epochs[StacksEpochId::Epoch10].cmp(&epochs[StacksEpochId::Epoch20]), + Ordering::Less + ); + assert_eq!( + epochs[StacksEpochId::Epoch20].cmp(&epochs[StacksEpochId::Epoch2_05]), + Ordering::Less + ); + assert_eq!( + epochs[StacksEpochId::Epoch10].cmp(&epochs[StacksEpochId::Epoch2_05]), + Ordering::Less + ); + assert_eq!( + epochs[StacksEpochId::Epoch10].cmp(&epochs[StacksEpochId::Epoch10]), + Ordering::Equal + ); + assert_eq!( + epochs[StacksEpochId::Epoch20].cmp(&epochs[StacksEpochId::Epoch20]), + Ordering::Equal + ); + assert_eq!( + epochs[StacksEpochId::Epoch2_05].cmp(&epochs[StacksEpochId::Epoch2_05]), + Ordering::Equal + ); + assert_eq!( + epochs[StacksEpochId::Epoch21].cmp(&epochs[StacksEpochId::Epoch21]), + Ordering::Equal + ); + assert_eq!( + epochs[StacksEpochId::Epoch22].cmp(&epochs[StacksEpochId::Epoch22]), + Ordering::Equal + ); + assert_eq!( + epochs[StacksEpochId::Epoch2_05].cmp(&epochs[StacksEpochId::Epoch10]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch2_05].cmp(&epochs[StacksEpochId::Epoch20]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch20].cmp(&epochs[StacksEpochId::Epoch10]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch21].cmp(&epochs[StacksEpochId::Epoch10]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch21].cmp(&epochs[StacksEpochId::Epoch20]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch21].cmp(&epochs[StacksEpochId::Epoch2_05]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch22].cmp(&epochs[StacksEpochId::Epoch10]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch22].cmp(&epochs[StacksEpochId::Epoch20]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch22].cmp(&epochs[StacksEpochId::Epoch2_05]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch22].cmp(&epochs[StacksEpochId::Epoch21]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch23].cmp(&epochs[StacksEpochId::Epoch10]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch23].cmp(&epochs[StacksEpochId::Epoch20]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch23].cmp(&epochs[StacksEpochId::Epoch2_05]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch23].cmp(&epochs[StacksEpochId::Epoch21]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch23].cmp(&epochs[StacksEpochId::Epoch22]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch24].cmp(&epochs[StacksEpochId::Epoch10]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch24].cmp(&epochs[StacksEpochId::Epoch20]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch24].cmp(&epochs[StacksEpochId::Epoch2_05]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch24].cmp(&epochs[StacksEpochId::Epoch21]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch24].cmp(&epochs[StacksEpochId::Epoch22]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch24].cmp(&epochs[StacksEpochId::Epoch23]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch25].cmp(&epochs[StacksEpochId::Epoch10]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch25].cmp(&epochs[StacksEpochId::Epoch20]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch25].cmp(&epochs[StacksEpochId::Epoch2_05]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch25].cmp(&epochs[StacksEpochId::Epoch21]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch25].cmp(&epochs[StacksEpochId::Epoch22]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch25].cmp(&epochs[StacksEpochId::Epoch23]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch25].cmp(&epochs[StacksEpochId::Epoch24]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch30].cmp(&epochs[StacksEpochId::Epoch10]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch30].cmp(&epochs[StacksEpochId::Epoch20]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch30].cmp(&epochs[StacksEpochId::Epoch2_05]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch30].cmp(&epochs[StacksEpochId::Epoch21]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch30].cmp(&epochs[StacksEpochId::Epoch22]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch30].cmp(&epochs[StacksEpochId::Epoch23]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch30].cmp(&epochs[StacksEpochId::Epoch24]), + Ordering::Greater + ); + assert_eq!( + epochs[StacksEpochId::Epoch30].cmp(&epochs[StacksEpochId::Epoch25]), + Ordering::Greater + ); } #[test] @@ -558,35 +691,35 @@ fn test_ord_for_stacks_epoch_id() { } pub trait StacksEpochExtension { #[cfg(test)] - fn unit_test(stacks_epoch_id: StacksEpochId, epoch_2_0_block_height: u64) -> Vec; + fn unit_test(stacks_epoch_id: StacksEpochId, epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] - fn unit_test_2_05(epoch_2_0_block_height: u64) -> Vec; + fn unit_test_2_05(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] - fn unit_test_2_05_only(epoch_2_0_block_height: u64) -> Vec; + fn unit_test_2_05_only(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] - fn unit_test_pre_2_05(epoch_2_0_block_height: u64) -> Vec; + fn unit_test_pre_2_05(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] - fn unit_test_2_1(epoch_2_0_block_height: u64) -> Vec; + fn unit_test_2_1(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] - fn unit_test_2_2(epoch_2_0_block_height: u64) -> Vec; + fn unit_test_2_2(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] - fn unit_test_2_3(epoch_2_0_block_height: u64) -> Vec; + fn unit_test_2_3(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] - fn unit_test_2_4(epoch_2_0_block_height: u64) -> Vec; + fn unit_test_2_4(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] - fn unit_test_2_5(epoch_2_0_block_height: u64) -> Vec; + fn unit_test_2_5(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] - fn unit_test_3_0(epoch_2_0_block_height: u64) -> Vec; + fn unit_test_3_0(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] - fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> Vec; + fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> EpochList; #[cfg(test)] - fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec; + fn unit_test_3_0_only(first_burnchain_height: u64) -> EpochList; fn all( epoch_2_0_block_height: u64, epoch_2_05_block_height: u64, epoch_2_1_block_height: u64, - ) -> Vec; - fn validate_epochs(epochs: &[StacksEpoch]) -> Vec; + ) -> EpochList; + fn validate_epochs(epochs: &[StacksEpoch]) -> EpochList; /// This method gets the epoch vector. /// /// Choose according to: @@ -597,15 +730,15 @@ pub trait StacksEpochExtension { /// fn get_epochs( bitcoin_network: BitcoinNetworkType, - configured_epochs: Option<&Vec>, - ) -> Vec; + configured_epochs: Option<&EpochList>, + ) -> EpochList; } impl StacksEpochExtension for StacksEpoch { fn get_epochs( bitcoin_network: BitcoinNetworkType, - configured_epochs: Option<&Vec>, - ) -> Vec { + configured_epochs: Option<&EpochList>, + ) -> EpochList { match configured_epochs { Some(epochs) => { assert!(bitcoin_network != BitcoinNetworkType::Mainnet); @@ -616,13 +749,13 @@ impl StacksEpochExtension for StacksEpoch { } #[cfg(test)] - fn unit_test_pre_2_05(first_burnchain_height: u64) -> Vec { + fn unit_test_pre_2_05(first_burnchain_height: u64) -> EpochList { info!( "StacksEpoch unit_test first_burn_height = {}", first_burnchain_height ); - vec![ + EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -637,17 +770,17 @@ impl StacksEpochExtension for StacksEpoch { block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_0, }, - ] + ]) } #[cfg(test)] - fn unit_test_2_05(first_burnchain_height: u64) -> Vec { + fn unit_test_2_05(first_burnchain_height: u64) -> EpochList { info!( "StacksEpoch unit_test first_burn_height = {}", first_burnchain_height ); - vec![ + EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -675,17 +808,17 @@ impl StacksEpochExtension for StacksEpoch { }, network_epoch: PEER_VERSION_EPOCH_2_05, }, - ] + ]) } #[cfg(test)] - fn unit_test_2_05_only(first_burnchain_height: u64) -> Vec { + fn unit_test_2_05_only(first_burnchain_height: u64) -> EpochList { info!( "StacksEpoch unit_test first_burn_height = {}", first_burnchain_height ); - vec![ + EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -713,17 +846,17 @@ impl StacksEpochExtension for StacksEpoch { }, network_epoch: PEER_VERSION_EPOCH_2_05, }, - ] + ]) } #[cfg(test)] - fn unit_test_2_1(first_burnchain_height: u64) -> Vec { + fn unit_test_2_1(first_burnchain_height: u64) -> EpochList { info!( "StacksEpoch unit_test first_burn_height = {}", first_burnchain_height ); - vec![ + EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -764,17 +897,17 @@ impl StacksEpochExtension for StacksEpoch { }, network_epoch: PEER_VERSION_EPOCH_2_1, }, - ] + ]) } #[cfg(test)] - fn unit_test_2_2(first_burnchain_height: u64) -> Vec { + fn unit_test_2_2(first_burnchain_height: u64) -> EpochList { info!( "StacksEpoch unit_test first_burn_height = {}", first_burnchain_height ); - vec![ + EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -828,17 +961,17 @@ impl StacksEpochExtension for StacksEpoch { }, network_epoch: PEER_VERSION_EPOCH_2_2, }, - ] + ]) } #[cfg(test)] - fn unit_test_2_3(first_burnchain_height: u64) -> Vec { + fn unit_test_2_3(first_burnchain_height: u64) -> EpochList { info!( "StacksEpoch unit_test_2_3 first_burn_height = {}", first_burnchain_height ); - vec![ + EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -905,17 +1038,17 @@ impl StacksEpochExtension for StacksEpoch { }, network_epoch: PEER_VERSION_EPOCH_2_3, }, - ] + ]) } #[cfg(test)] - fn unit_test_2_4(first_burnchain_height: u64) -> Vec { + fn unit_test_2_4(first_burnchain_height: u64) -> EpochList { info!( "StacksEpoch unit_test_2_4 first_burn_height = {}", first_burnchain_height ); - vec![ + EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -995,17 +1128,17 @@ impl StacksEpochExtension for StacksEpoch { }, network_epoch: PEER_VERSION_EPOCH_2_4, }, - ] + ]) } #[cfg(test)] - fn unit_test_2_5(first_burnchain_height: u64) -> Vec { + fn unit_test_2_5(first_burnchain_height: u64) -> EpochList { info!( "StacksEpoch unit_test_2_5 first_burn_height = {}", first_burnchain_height ); - vec![ + EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -1098,17 +1231,17 @@ impl StacksEpochExtension for StacksEpoch { }, network_epoch: PEER_VERSION_EPOCH_2_5, }, - ] + ]) } #[cfg(test)] - fn unit_test_3_0(first_burnchain_height: u64) -> Vec { + fn unit_test_3_0(first_burnchain_height: u64) -> EpochList { info!( "StacksEpoch unit_test_3_0 first_burn_height = {}", first_burnchain_height ); - vec![ + EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -1214,17 +1347,17 @@ impl StacksEpochExtension for StacksEpoch { }, network_epoch: PEER_VERSION_EPOCH_3_0, }, - ] + ]) } #[cfg(test)] - fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { + fn unit_test_2_1_only(first_burnchain_height: u64) -> EpochList { info!( "StacksEpoch unit_test first_burn_height = {}", first_burnchain_height ); - vec![ + EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -1265,17 +1398,17 @@ impl StacksEpochExtension for StacksEpoch { }, network_epoch: PEER_VERSION_EPOCH_2_1, }, - ] + ]) } #[cfg(test)] - fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec { + fn unit_test_3_0_only(first_burnchain_height: u64) -> EpochList { info!( "StacksEpoch unit_test first_burn_height = {}", first_burnchain_height ); - vec![ + EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -1339,11 +1472,11 @@ impl StacksEpochExtension for StacksEpoch { block_limit: BLOCK_LIMIT_MAINNET_21, network_epoch: PEER_VERSION_EPOCH_3_0, }, - ] + ]) } #[cfg(test)] - fn unit_test(stacks_epoch_id: StacksEpochId, first_burnchain_height: u64) -> Vec { + fn unit_test(stacks_epoch_id: StacksEpochId, first_burnchain_height: u64) -> EpochList { match stacks_epoch_id { StacksEpochId::Epoch10 | StacksEpochId::Epoch20 => { StacksEpoch::unit_test_pre_2_05(first_burnchain_height) @@ -1362,8 +1495,8 @@ impl StacksEpochExtension for StacksEpoch { epoch_2_0_block_height: u64, epoch_2_05_block_height: u64, epoch_2_1_block_height: u64, - ) -> Vec { - vec![ + ) -> EpochList { + EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -1392,13 +1525,13 @@ impl StacksEpochExtension for StacksEpoch { block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_1_0, }, - ] + ]) } /// Verify that a list of epochs is well-formed, and if so, return the list of epochs. /// Epochs must proceed in order, and must represent contiguous block ranges. /// Panic if the list is not well-formed. - fn validate_epochs(epochs_ref: &[StacksEpoch]) -> Vec { + fn validate_epochs(epochs_ref: &[StacksEpoch]) -> EpochList { // sanity check -- epochs must all be contiguous, each epoch must be unique, // and the range of epochs should span the whole non-negative i64 space. let mut epochs = epochs_ref.to_vec(); @@ -1449,6 +1582,6 @@ impl StacksEpochExtension for StacksEpoch { } assert_eq!(epoch_end_height, STACKS_EPOCH_MAX); - epochs + EpochList::new(&epochs) } } diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 98315cffa8..16fbd7c2d2 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -49,6 +49,10 @@ use blockstack_lib::chainstate::burn::db::sortdb::{ use blockstack_lib::chainstate::burn::operations::BlockstackOperationType; use blockstack_lib::chainstate::burn::{BlockSnapshot, ConsensusHash}; use blockstack_lib::chainstate::coordinator::{get_reward_cycle_info, OnChainRewardSetProvider}; +use blockstack_lib::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use blockstack_lib::chainstate::nakamoto::shadow::{ + process_shadow_block, shadow_chainstate_repair, +}; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use blockstack_lib::chainstate::stacks::db::blocks::{DummyEventDispatcher, StagingBlock}; use blockstack_lib::chainstate::stacks::db::{ @@ -247,6 +251,56 @@ impl P2PSession { } } +fn open_nakamoto_chainstate_dbs( + chainstate_dir: &str, + network: &str, +) -> (SortitionDB, StacksChainState) { + let (mainnet, chain_id, pox_constants, dirname) = match network { + "mainnet" => ( + true, + CHAIN_ID_MAINNET, + PoxConstants::mainnet_default(), + network, + ), + "krypton" => ( + false, + 0x80000100, + PoxConstants::nakamoto_testnet_default(), + network, + ), + "naka3" => ( + false, + 0x80000000, + PoxConstants::new(20, 5, 3, 100, 0, u64::MAX, u64::MAX, 104, 105, 106, 107), + "nakamoto-neon", + ), + _ => { + panic!("Unrecognized network name '{}'", network); + } + }; + + let chain_state_path = format!("{}/{}/chainstate/", chainstate_dir, dirname); + let sort_db_path = format!("{}/{}/burnchain/sortition/", chainstate_dir, dirname); + + let sort_db = SortitionDB::open(&sort_db_path, true, pox_constants) + .unwrap_or_else(|_| panic!("Failed to open {sort_db_path}")); + + let (chain_state, _) = StacksChainState::open(mainnet, chain_id, &chain_state_path, None) + .expect("Failed to open stacks chain state"); + + (sort_db, chain_state) +} + +fn check_shadow_network(network: &str) { + if network != "mainnet" && network != "krypton" && network != "naka3" { + eprintln!( + "Unknown network '{}': only support 'mainnet', 'krypton', or 'naka3'", + &network + ); + process::exit(1); + } +} + #[cfg_attr(test, mutants::skip)] fn main() { let mut argv: Vec = env::args().collect(); @@ -1166,6 +1220,204 @@ simulating a miner. println!("{:?}", inv); } + if argv[1] == "get-nakamoto-tip" { + if argv.len() < 4 { + eprintln!( + "Usage: {} get-nakamoto-tip CHAINSTATE_DIR NETWORK", + &argv[0] + ); + process::exit(1); + } + + let chainstate_dir = argv[2].as_str(); + let network = argv[3].as_str(); + + check_shadow_network(network); + let (sort_db, chain_state) = open_nakamoto_chainstate_dbs(chainstate_dir, network); + + let header = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap() + .unwrap(); + println!("{}", &header.index_block_hash()); + process::exit(0); + } + + if argv[1] == "get-account" { + if argv.len() < 5 { + eprintln!( + "Usage: {} get-account CHAINSTATE_DIR mainnet|krypton ADDRESS [CHAIN_TIP]", + &argv[0] + ); + process::exit(1); + } + + let chainstate_dir = argv[2].as_str(); + let network = argv[3].as_str(); + let addr = StacksAddress::from_string(&argv[4]).unwrap(); + let chain_tip: Option = + argv.get(5).map(|tip| StacksBlockId::from_hex(tip).unwrap()); + + check_shadow_network(network); + let (sort_db, mut chain_state) = open_nakamoto_chainstate_dbs(chainstate_dir, network); + + let chain_tip_header = chain_tip + .map(|tip| { + let header = NakamotoChainState::get_block_header_nakamoto(chain_state.db(), &tip) + .unwrap() + .unwrap(); + header + }) + .unwrap_or_else(|| { + let header = + NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap() + .unwrap(); + header + }); + + let account = + NakamotoBlockBuilder::get_account(&mut chain_state, &sort_db, &addr, &chain_tip_header) + .unwrap(); + println!("{:#?}", &account); + process::exit(0); + } + + if argv[1] == "make-shadow-block" { + if argv.len() < 5 { + eprintln!( + "Usage: {} make-shadow-block CHAINSTATE_DIR NETWORK CHAIN_TIP_HASH [TX...]", + &argv[0] + ); + process::exit(1); + } + let chainstate_dir = argv[2].as_str(); + let network = argv[3].as_str(); + let chain_tip = StacksBlockId::from_hex(argv[4].as_str()).unwrap(); + let txs = argv[5..] + .iter() + .map(|tx_str| { + let tx_bytes = hex_bytes(&tx_str).unwrap(); + let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + tx + }) + .collect(); + + check_shadow_network(network); + let (sort_db, mut chain_state) = open_nakamoto_chainstate_dbs(chainstate_dir, network); + let header = NakamotoChainState::get_block_header(chain_state.db(), &chain_tip) + .unwrap() + .unwrap(); + + let shadow_block = NakamotoBlockBuilder::make_shadow_tenure( + &mut chain_state, + &sort_db, + chain_tip, + header.consensus_hash, + txs, + ) + .unwrap(); + + println!("{}", to_hex(&shadow_block.serialize_to_vec())); + process::exit(0); + } + + // Generates the shadow blocks needed to restore this node to working order. + // Automatically inserts and processes them as well. + // Prints out the generated shadow blocks (as JSON) + if argv[1] == "shadow-chainstate-repair" { + if argv.len() < 4 { + eprintln!( + "Usage: {} shadow-chainstate-repair CHAINSTATE_DIR NETWORK", + &argv[0] + ); + process::exit(1); + } + + let chainstate_dir = argv[2].as_str(); + let network = argv[3].as_str(); + + check_shadow_network(network); + + let (mut sort_db, mut chain_state) = open_nakamoto_chainstate_dbs(chainstate_dir, network); + let shadow_blocks = shadow_chainstate_repair(&mut chain_state, &mut sort_db).unwrap(); + + let shadow_blocks_hex: Vec<_> = shadow_blocks + .into_iter() + .map(|blk| to_hex(&blk.serialize_to_vec())) + .collect(); + + println!("{}", serde_json::to_string(&shadow_blocks_hex).unwrap()); + process::exit(0); + } + + // Inserts and processes shadow blocks generated from `shadow-chainstate-repair` + if argv[1] == "shadow-chainstate-patch" { + if argv.len() < 5 { + eprintln!( + "Usage: {} shadow-chainstate-patch CHAINSTATE_DIR NETWORK SHADOW_BLOCKS_PATH.JSON", + &argv[0] + ); + process::exit(1); + } + + let chainstate_dir = argv[2].as_str(); + let network = argv[3].as_str(); + let shadow_blocks_json_path = argv[4].as_str(); + + let shadow_blocks_hex = { + let mut blocks_json_file = + File::open(shadow_blocks_json_path).expect("Unable to open file"); + let mut buffer = vec![]; + blocks_json_file.read_to_end(&mut buffer).unwrap(); + let shadow_blocks_hex: Vec = serde_json::from_slice(&buffer).unwrap(); + shadow_blocks_hex + }; + + let shadow_blocks: Vec<_> = shadow_blocks_hex + .into_iter() + .map(|blk_hex| { + NakamotoBlock::consensus_deserialize(&mut hex_bytes(&blk_hex).unwrap().as_slice()) + .unwrap() + }) + .collect(); + + check_shadow_network(network); + + let (mut sort_db, mut chain_state) = open_nakamoto_chainstate_dbs(chainstate_dir, network); + for shadow_block in shadow_blocks.into_iter() { + process_shadow_block(&mut chain_state, &mut sort_db, shadow_block).unwrap(); + } + + process::exit(0); + } + + if argv[1] == "add-shadow-block" { + if argv.len() < 5 { + eprintln!( + "Usage: {} add-shadow-block CHAINSTATE_DIR NETWORK SHADOW_BLOCK_HEX", + &argv[0] + ); + process::exit(1); + } + let chainstate_dir = argv[2].as_str(); + let network = argv[3].as_str(); + let block_hex = argv[4].as_str(); + let shadow_block = + NakamotoBlock::consensus_deserialize(&mut hex_bytes(block_hex).unwrap().as_slice()) + .unwrap(); + + assert!(shadow_block.is_shadow_block()); + + check_shadow_network(network); + let (_, mut chain_state) = open_nakamoto_chainstate_dbs(chainstate_dir, network); + + let tx = chain_state.staging_db_tx_begin().unwrap(); + tx.add_shadow_block(&shadow_block).unwrap(); + tx.commit().unwrap(); + + process::exit(0); + } + if argv[1] == "replay-chainstate" { if argv.len() < 7 { eprintln!("Usage: {} OLD_CHAINSTATE_PATH OLD_SORTITION_DB_PATH OLD_BURNCHAIN_DB_PATH NEW_CHAINSTATE_PATH NEW_BURNCHAIN_DB_PATH", &argv[0]); @@ -1470,6 +1722,35 @@ simulating a miner. process::exit(0); } + if argv[1] == "replay-naka-block" { + let chain_config = + if let Some(network_flag_ix) = argv.iter().position(|arg| arg == "--network") { + let Some(network_choice) = argv.get(network_flag_ix + 1) else { + eprintln!("Must supply network choice after `--network` option"); + process::exit(1); + }; + + let network_config = match network_choice.to_lowercase().as_str() { + "testnet" => cli::StacksChainConfig::default_testnet(), + "mainnet" => cli::StacksChainConfig::default_mainnet(), + other => { + eprintln!("Unknown network choice `{other}`"); + process::exit(1); + } + }; + + argv.remove(network_flag_ix + 1); + argv.remove(network_flag_ix); + + Some(network_config) + } else { + None + }; + + cli::command_replay_block_nakamoto(&argv[1..], chain_config.as_ref()); + process::exit(0); + } + if argv[1] == "replay-mock-mining" { cli::command_replay_mock_mining(&argv[1..], None); process::exit(0); diff --git a/stackslib/src/net/api/get_tenures_fork_info.rs b/stackslib/src/net/api/get_tenures_fork_info.rs index 8bcf32ce1d..2cb2847290 100644 --- a/stackslib/src/net/api/get_tenures_fork_info.rs +++ b/stackslib/src/net/api/get_tenures_fork_info.rs @@ -231,21 +231,31 @@ impl RPCRequestHandler for GetTenuresForkInfo { chainstate, &network.stacks_tip.block_id(), )?); - let handle = sortdb.index_handle(&cursor.sortition_id); let mut depth = 0; while depth < DEPTH_LIMIT && cursor.consensus_hash != recurse_end { - depth += 1; if height_bound >= cursor.block_height { return Err(ChainError::NotInSameFork); } - cursor = handle - .get_last_snapshot_with_sortition(cursor.block_height.saturating_sub(1))?; - results.push(TenureForkingInfo::from_snapshot( - &cursor, - sortdb, - chainstate, - &network.stacks_tip.block_id(), - )?); + cursor = + SortitionDB::get_block_snapshot(sortdb.conn(), &cursor.parent_sortition_id)? + .ok_or_else(|| ChainError::NoSuchBlockError)?; + if cursor.sortition + || chainstate + .nakamoto_blocks_db() + .is_shadow_tenure(&cursor.consensus_hash)? + { + results.push(TenureForkingInfo::from_snapshot( + &cursor, + sortdb, + chainstate, + &network.stacks_tip.block_id(), + )?); + } + if cursor.sortition { + // don't count shadow blocks towards the depth, since there can be a large + // swath of them. + depth += 1; + } } Ok(results) diff --git a/stackslib/src/net/api/getattachmentsinv.rs b/stackslib/src/net/api/getattachmentsinv.rs index 2ea73baf04..b7fe94baf1 100644 --- a/stackslib/src/net/api/getattachmentsinv.rs +++ b/stackslib/src/net/api/getattachmentsinv.rs @@ -96,11 +96,10 @@ impl HttpRequest for RPCGetAttachmentsInvRequestHandler { if key == "index_block_hash" { index_block_hash = StacksBlockId::from_hex(&value).ok(); } else if key == "pages_indexes" { - if let Ok(pages_indexes_value) = value.parse::() { - for entry in pages_indexes_value.split(',') { - if let Ok(page_index) = entry.parse::() { - page_indexes.insert(page_index); - } + let pages_indexes_value = value.to_string(); + for entry in pages_indexes_value.split(',') { + if let Ok(page_index) = entry.parse::() { + page_indexes.insert(page_index); } } } diff --git a/stackslib/src/net/api/getblockbyheight.rs b/stackslib/src/net/api/getblockbyheight.rs new file mode 100644 index 0000000000..9a17589c5d --- /dev/null +++ b/stackslib/src/net/api/getblockbyheight.rs @@ -0,0 +1,231 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::io::{Read, Seek, SeekFrom, Write}; +use std::{fs, io}; + +use regex::{Captures, Regex}; +use rusqlite::Connection; +use serde::de::Error as de_Error; +use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; +use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; +use stacks_common::types::net::PeerHost; +use {serde, serde_json}; + +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn, StacksDBIndexed, +}; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::Error as ChainError; +use crate::net::api::getblock_v3::{NakamotoBlockStream, RPCNakamotoBlockRequestHandler}; +use crate::net::http::{ + parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, + HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, + HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, +}; +use crate::net::httpcore::{ + HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, + StacksHttpResponse, +}; +use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; +use crate::util_lib::db::{DBConn, Error as DBError}; + +#[derive(Clone)] +pub struct RPCNakamotoBlockByHeightRequestHandler { + pub block_height: Option, +} + +impl RPCNakamotoBlockByHeightRequestHandler { + pub fn new() -> Self { + Self { block_height: None } + } +} + +/// Decode the HTTP request +impl HttpRequest for RPCNakamotoBlockByHeightRequestHandler { + fn verb(&self) -> &'static str { + "GET" + } + + fn path_regex(&self) -> Regex { + Regex::new(r#"^/v3/blocks/height/(?P[0-9]{1,20})$"#).unwrap() + } + + fn metrics_identifier(&self) -> &str { + "/v3/blocks/height/:block_height" + } + + /// Try to decode this request. + /// There's nothing to load here, so just make sure the request is well-formed. + fn try_parse_request( + &mut self, + preamble: &HttpRequestPreamble, + captures: &Captures, + query: Option<&str>, + _body: &[u8], + ) -> Result { + if preamble.get_content_length() != 0 { + return Err(Error::DecodeError( + "Invalid Http request: expected 0-length body".to_string(), + )); + } + + let block_height_str = captures + .name("block_height") + .ok_or_else(|| { + Error::DecodeError("Failed to match path to block height group".to_string()) + })? + .as_str(); + + let block_height = block_height_str.parse::().map_err(|_| { + Error::DecodeError("Invalid path: unparseable block height".to_string()) + })?; + self.block_height = Some(block_height); + + Ok(HttpRequestContents::new().query_string(query)) + } +} + +impl RPCRequestHandler for RPCNakamotoBlockByHeightRequestHandler { + /// Reset internal state + fn restart(&mut self) { + self.block_height = None; + } + + /// Make the response + fn try_handle_request( + &mut self, + preamble: HttpRequestPreamble, + contents: HttpRequestContents, + node: &mut StacksNodeState, + ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { + let block_height = self + .block_height + .take() + .ok_or(NetError::SendError("Missing `block_height`".into()))?; + + let tip = match node.load_stacks_chain_tip(&preamble, &contents) { + Ok(tip) => tip, + Err(error_resp) => { + return error_resp.try_into_contents().map_err(NetError::from); + } + }; + + let index_block_hash_res = + node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + chainstate + .index_conn() + .get_ancestor_block_hash(block_height, &tip) + }); + + let block_id = match index_block_hash_res { + Ok(index_block_hash_opt) => match index_block_hash_opt { + Some(index_block_hash) => index_block_hash, + None => { + // block hash not found + let msg = format!("No such block #{:?}\n", block_height); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpNotFound::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }, + Err(e) => { + // error querying the db + let msg = format!("Failed to load block #{}: {:?}\n", block_height, &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let stream_res = + node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { + let Some((tenure_id, parent_block_id)) = chainstate + .nakamoto_blocks_db() + .get_tenure_and_parent_block_id(&block_id)? + else { + return Err(ChainError::NoSuchBlockError); + }; + NakamotoBlockStream::new(chainstate, block_id, tenure_id, parent_block_id) + }); + + // start loading up the block + let stream = match stream_res { + Ok(stream) => stream, + Err(ChainError::NoSuchBlockError) => { + return StacksHttpResponse::new_error( + &preamble, + &HttpNotFound::new(format!("No such block #{:?}\n", &block_height)), + ) + .try_into_contents() + .map_err(NetError::from) + } + Err(e) => { + // nope -- error trying to check + let msg = format!("Failed to load block #{}: {:?}\n", block_height, &e); + warn!("{}", &msg); + return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) + .try_into_contents() + .map_err(NetError::from); + } + }; + + let resp_preamble = HttpResponsePreamble::from_http_request_preamble( + &preamble, + 200, + "OK", + None, + HttpContentType::Bytes, + ); + + Ok(( + resp_preamble, + HttpResponseContents::from_stream(Box::new(stream)), + )) + } +} + +/// Decode the HTTP response +impl HttpResponse for RPCNakamotoBlockByHeightRequestHandler { + /// Decode this response from a byte stream. This is called by the client to decode this + /// message + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } +} + +impl StacksHttpRequest { + pub fn new_get_nakamoto_block_by_height( + host: PeerHost, + block_height: u64, + tip: TipRequest, + ) -> StacksHttpRequest { + StacksHttpRequest::new_for_peer( + host, + "GET".into(), + format!("/v3/blocks/height/{}", block_height), + HttpRequestContents::new().for_tip(tip), + ) + .expect("FATAL: failed to construct request from infallible data") + } +} diff --git a/stackslib/src/net/api/getsortition.rs b/stackslib/src/net/api/getsortition.rs index 9b22d8b82f..b41e516cbf 100644 --- a/stackslib/src/net/api/getsortition.rs +++ b/stackslib/src/net/api/getsortition.rs @@ -29,7 +29,9 @@ use {serde, serde_json}; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn, StacksDBIndexed, +}; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as ChainError; use crate::net::api::getblock_v3::NakamotoBlockStream; @@ -85,6 +87,11 @@ pub struct SortitionInfo { pub consensus_hash: ConsensusHash, /// Boolean indicating whether or not there was a succesful sortition (i.e. a winning /// block or miner was chosen). + /// + /// This will *also* be true if this sortition corresponds to a shadow block. This is because + /// the signer does not distinguish between shadow blocks and blocks with sortitions, so until + /// we can update the signer and this interface, we'll have to report the presence of a shadow + /// block tenure in a way that the signer currently understands. pub was_sortition: bool, /// If sortition occurred, and the miner's VRF key registration /// associated a nakamoto mining pubkey with their commit, this @@ -150,13 +157,41 @@ impl GetSortitionHandler { fn get_sortition_info( sortition_sn: BlockSnapshot, sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + tip: &StacksBlockId, ) -> Result { + let is_shadow = chainstate + .nakamoto_blocks_db() + .is_shadow_tenure(&sortition_sn.consensus_hash)?; let (miner_pk_hash160, stacks_parent_ch, committed_block_hash, last_sortition_ch) = - if !sortition_sn.sortition { + if !sortition_sn.sortition && !is_shadow { let handle = sortdb.index_handle(&sortition_sn.sortition_id); let last_sortition = handle.get_last_snapshot_with_sortition(sortition_sn.block_height)?; (None, None, None, Some(last_sortition.consensus_hash)) + } else if !sortition_sn.sortition && is_shadow { + // this is a shadow tenure. + let parent_tenure_ch = chainstate + .index_conn() + .get_parent_tenure_consensus_hash(tip, &sortition_sn.consensus_hash)? + .ok_or_else(|| DBError::NotFoundError)?; + + let parent_tenure_start_header = + NakamotoChainState::get_nakamoto_tenure_start_block_header( + &mut chainstate.index_conn(), + tip, + &parent_tenure_ch, + )? + .ok_or_else(|| DBError::NotFoundError)?; + + ( + Some(Hash160([0x00; 20])), + Some(parent_tenure_ch.clone()), + Some(BlockHeaderHash( + parent_tenure_start_header.index_block_hash().0, + )), + Some(parent_tenure_ch), + ) } else { let block_commit = SortitionDB::get_block_commit(sortdb.conn(), &sortition_sn.winning_block_txid, &sortition_sn.sortition_id)? .ok_or_else(|| { @@ -211,7 +246,7 @@ impl GetSortitionHandler { sortition_id: sortition_sn.sortition_id, parent_sortition_id: sortition_sn.parent_sortition_id, consensus_hash: sortition_sn.consensus_hash, - was_sortition: sortition_sn.sortition, + was_sortition: sortition_sn.sortition || is_shadow, miner_pk_hash160, stacks_parent_ch, last_sortition_ch, @@ -277,7 +312,7 @@ impl RPCRequestHandler for GetSortitionHandler { _contents: HttpRequestContents, node: &mut StacksNodeState, ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let result = node.with_node_state(|network, sortdb, _chainstate, _mempool, _rpc_args| { + let result = node.with_node_state(|network, sortdb, chainstate, _mempool, _rpc_args| { let query_result = match self.query { QuerySpecifier::Latest => Ok(Some(network.burnchain_tip.clone())), QuerySpecifier::ConsensusHash(ref consensus_hash) => { @@ -306,7 +341,12 @@ impl RPCRequestHandler for GetSortitionHandler { } }; let sortition_sn = query_result?.ok_or_else(|| ChainError::NoSuchBlockError)?; - Self::get_sortition_info(sortition_sn, sortdb) + Self::get_sortition_info( + sortition_sn, + sortdb, + chainstate, + &network.stacks_tip.block_id(), + ) }); let block = match result { @@ -334,13 +374,18 @@ impl RPCRequestHandler for GetSortitionHandler { if self.query == QuerySpecifier::LatestAndLast { // if latest **and** last are requested, lookup the sortition info for last_sortition_ch if let Some(last_sortition_ch) = last_sortition_ch { - let result = node.with_node_state(|_, sortdb, _, _, _| { + let result = node.with_node_state(|network, sortdb, chainstate, _, _| { let last_sortition_sn = SortitionDB::get_block_snapshot_consensus( sortdb.conn(), &last_sortition_ch, )? .ok_or_else(|| ChainError::NoSuchBlockError)?; - Self::get_sortition_info(last_sortition_sn, sortdb) + Self::get_sortition_info( + last_sortition_sn, + sortdb, + chainstate, + &network.stacks_tip.block_id(), + ) }); let last_block = match result { Ok(block) => block, diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index 72aa417204..8fc8ee33ba 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -42,6 +42,7 @@ pub mod getattachment; pub mod getattachmentsinv; pub mod getblock; pub mod getblock_v3; +pub mod getblockbyheight; pub mod getconstantval; pub mod getcontractabi; pub mod getcontractsrc; @@ -92,6 +93,7 @@ impl StacksHttp { self.register_rpc_endpoint(getattachmentsinv::RPCGetAttachmentsInvRequestHandler::new()); self.register_rpc_endpoint(getblock::RPCBlocksRequestHandler::new()); self.register_rpc_endpoint(getblock_v3::RPCNakamotoBlockRequestHandler::new()); + self.register_rpc_endpoint(getblockbyheight::RPCNakamotoBlockByHeightRequestHandler::new()); self.register_rpc_endpoint(getconstantval::RPCGetConstantValRequestHandler::new()); self.register_rpc_endpoint(getcontractabi::RPCGetContractAbiRequestHandler::new()); self.register_rpc_endpoint(getcontractsrc::RPCGetContractSrcRequestHandler::new()); diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 0d1cf2ebf9..2d93c4a789 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -343,6 +343,17 @@ impl NakamotoBlockProposal { sortdb: &SortitionDB, chainstate: &mut StacksChainState, // not directly used; used as a handle to open other chainstates ) -> Result { + #[cfg(any(test, feature = "testing"))] + { + if *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { + // Do an extra check just so we don't log EVERY time. + warn!("Block validation is stalled due to testing directive."); + while *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Block validation is no longer stalled due to testing directive."); + } + } let ts_start = get_epoch_time_ms(); // Measure time from start of function let time_elapsed = || get_epoch_time_ms().saturating_sub(ts_start); @@ -413,7 +424,8 @@ impl NakamotoBlockProposal { }; // Static validation checks - NakamotoChainState::validate_nakamoto_block_burnchain( + NakamotoChainState::validate_normal_nakamoto_block_burnchain( + chainstate.nakamoto_blocks_db(), &db_handle, expected_burn_opt, &self.block, @@ -477,6 +489,7 @@ impl NakamotoBlockProposal { tenure_change, coinbase, self.block.header.pox_treatment.len(), + None, )?; let mut miner_tenure_info = @@ -546,24 +559,6 @@ impl NakamotoBlockProposal { }); } - #[cfg(any(test, feature = "testing"))] - { - if *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { - // Do an extra check just so we don't log EVERY time. - warn!("Block validation is stalled due to testing directive."; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - ); - while *TEST_VALIDATE_STALL.lock().unwrap() == Some(true) { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - info!("Block validation is no longer stalled due to testing directive."; - "block_id" => %block.block_id(), - "height" => block.header.chain_length, - ); - } - } - info!( "Participant: validated anchored block"; "block_header_hash" => %computed_block_header_hash, @@ -666,6 +661,12 @@ impl HttpRequest for RPCBlockProposalRequestHandler { } }; + if block_proposal.block.is_shadow_block() { + return Err(Error::DecodeError( + "Shadow blocks cannot be submitted for validation".to_string(), + )); + } + self.block_proposal = Some(block_proposal); Ok(HttpRequestContents::new().query_string(query)) } diff --git a/stackslib/src/net/api/tests/getblockbyheight.rs b/stackslib/src/net/api/tests/getblockbyheight.rs new file mode 100644 index 0000000000..0f608c4e13 --- /dev/null +++ b/stackslib/src/net/api/tests/getblockbyheight.rs @@ -0,0 +1,192 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; +use clarity::vm::{ClarityName, ContractName}; +use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::{ + BlockHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, +}; +use stacks_common::types::net::PeerHost; +use stacks_common::types::Address; + +use super::TestRPC; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::stacks::db::blocks::test::*; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::{ + Error as chainstate_error, StacksBlock, StacksBlockHeader, StacksMicroblock, +}; +use crate::net::api::getblock_v3::NakamotoBlockStream; +use crate::net::api::*; +use crate::net::connection::ConnectionOptions; +use crate::net::http::HttpChunkGenerator; +use crate::net::httpcore::{ + HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, + StacksHttpRequest, +}; +use crate::net::test::TestEventObserver; +use crate::net::tests::inv::nakamoto::make_nakamoto_peer_from_invs; +use crate::net::{ProtocolFamily, TipRequest}; +use crate::util_lib::db::DBConn; + +#[test] +fn test_try_parse_request() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); + + // NOTE: MARF enforces the height to be a u32 value + let request = StacksHttpRequest::new_get_nakamoto_block_by_height( + addr.into(), + 0xfffffffe, + TipRequest::UseLatestAnchoredTip, + ); + // NOTE: MARF enforces the height to be a u32 value + let request = StacksHttpRequest::new_get_nakamoto_block_by_height( + addr.into(), + 0xfffffffe, + TipRequest::UseLatestAnchoredTip, + ); + let bytes = request.try_serialize().unwrap(); + + debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); + + let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); + let mut handler = getblockbyheight::RPCNakamotoBlockByHeightRequestHandler::new(); + let mut parsed_request = http + .handle_try_parse_request( + &mut handler, + &parsed_preamble.expect_request(), + &bytes[offset..], + ) + .unwrap(); + + // parsed request consumes headers that would not be in a constructed request + parsed_request.clear_headers(); + let (preamble, contents) = parsed_request.destruct(); + + // consumed path args + assert_eq!(handler.block_height, Some(0xfffffffe)); + + assert_eq!(&preamble, request.preamble()); + + handler.restart(); + assert!(handler.block_height.is_none()); +} + +#[test] +fn test_try_make_response() { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); + + let test_observer = TestEventObserver::new(); + let rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); + + let nakamoto_chain_tip_height = rpc_test.tip_height.clone(); + let canonical_tip = rpc_test.canonical_tip.clone(); + let consensus_hash = rpc_test.consensus_hash.clone(); + + let mut requests = vec![]; + + // query existing block (empty tip) + let request = StacksHttpRequest::new_get_nakamoto_block_by_height( + addr.into(), + nakamoto_chain_tip_height, + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query non-existent block (with biggest positive u32 value - 1 as MARF enforces it) + let request = StacksHttpRequest::new_get_nakamoto_block_by_height( + addr.into(), + 0xfffffffe, + TipRequest::UseLatestAnchoredTip, + ); + requests.push(request); + + // query existing block using the canonical_tip + let request = StacksHttpRequest::new_get_nakamoto_block_by_height( + addr.into(), + nakamoto_chain_tip_height, + TipRequest::SpecificTip(rpc_test.canonical_tip), + ); + requests.push(request); + + // query existing block using the unconfirmed tip + let request = StacksHttpRequest::new_get_nakamoto_block_by_height( + addr.into(), + nakamoto_chain_tip_height, + TipRequest::UseLatestUnconfirmedTip, + ); + requests.push(request); + + // dummy hack for generating an invalid tip + let mut dummy_tip = rpc_test.canonical_tip.clone(); + dummy_tip.0[0] = dummy_tip.0[0].wrapping_add(1); + + let request = StacksHttpRequest::new_get_nakamoto_block_by_height( + addr.into(), + nakamoto_chain_tip_height, + TipRequest::SpecificTip(dummy_tip), + ); + // dummy hack for generating an invalid tip + let mut dummy_tip = rpc_test.canonical_tip.clone(); + dummy_tip.0[0] = dummy_tip.0[0].wrapping_add(1); + + let request = StacksHttpRequest::new_get_nakamoto_block_by_height( + addr.into(), + nakamoto_chain_tip_height, + TipRequest::SpecificTip(dummy_tip), + ); + requests.push(request); + + let mut responses = rpc_test.run(requests); + + // got the block + let response = responses.remove(0); + let resp = response.decode_nakamoto_block().unwrap(); + + assert_eq!(resp.header.consensus_hash, consensus_hash); + assert_eq!(resp.header.block_id(), canonical_tip); + + // no block + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); + + // got the block from the tip + let response = responses.remove(0); + let resp = response.decode_nakamoto_block().unwrap(); + + assert_eq!(resp.header.consensus_hash, consensus_hash); + assert_eq!(resp.header.block_id(), canonical_tip); + + // got the block from the tip (unconfirmed) + let response = responses.remove(0); + let resp = response.decode_nakamoto_block().unwrap(); + + assert_eq!(resp.header.consensus_hash, consensus_hash); + assert_eq!(resp.header.block_id(), canonical_tip); + + // no block for dummy tip + let response = responses.remove(0); + let (preamble, body) = response.destruct(); + + assert_eq!(preamble.status_code, 404); +} diff --git a/stackslib/src/net/api/tests/getsigner.rs b/stackslib/src/net/api/tests/getsigner.rs index ffaa486f27..a3b112d0e3 100644 --- a/stackslib/src/net/api/tests/getsigner.rs +++ b/stackslib/src/net/api/tests/getsigner.rs @@ -139,7 +139,7 @@ fn test_try_make_response() { let response = responses.remove(0); info!("response: {:?}", &response); let signer_response = response.decode_signer().unwrap(); - assert_eq!(signer_response.blocks_signed, 40); + assert_eq!(signer_response.blocks_signed, 20); // Signer doesn't exist so it should not have signed anything let response = responses.remove(0); diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index d19854bf02..0a6ad69762 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -47,7 +47,7 @@ use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::relay::Relayer; use crate::net::rpc::ConversationHttp; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; -use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs; +use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs_ext; use crate::net::{ Attachment, AttachmentInstance, MemPoolEventDispatcher, RPCHandlerArgs, StackerDBConfig, StacksNodeState, UrlString, @@ -60,6 +60,7 @@ mod getattachment; mod getattachmentsinv; mod getblock; mod getblock_v3; +mod getblockbyheight; mod getconstantval; mod getcontractabi; mod getcontractsrc; @@ -199,6 +200,10 @@ pub struct TestRPC<'a> { pub convo_2: ConversationHttp, /// hash of the chain tip pub canonical_tip: StacksBlockId, + /// block header hash of the chain tip + pub tip_hash: BlockHeaderHash, + /// block height of the chain tip + pub tip_height: u64, /// consensus hash of the chain tip pub consensus_hash: ConsensusHash, /// hash of last microblock @@ -515,6 +520,7 @@ impl<'a> TestRPC<'a> { let microblock_txids = microblock.txs.iter().map(|tx| tx.txid()).collect(); let canonical_tip = StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); + let tip_hash = stacks_block.block_hash(); if process_microblock { // store microblock stream @@ -812,6 +818,8 @@ impl<'a> TestRPC<'a> { 32, ); + let tip_height: u64 = 1; + TestRPC { privk1, privk2, @@ -822,6 +830,8 @@ impl<'a> TestRPC<'a> { convo_1, convo_2, canonical_tip, + tip_hash, + tip_height, consensus_hash, microblock_tip_hash: microblock.block_hash(), mempool_txids, @@ -839,8 +849,18 @@ impl<'a> TestRPC<'a> { true, true, true, true, true, true, true, true, true, true, ]]; - let (mut peer, mut other_peers) = - make_nakamoto_peers_from_invs(function_name!(), observer, 10, 3, bitvecs.clone(), 1); + let (mut peer, mut other_peers) = make_nakamoto_peers_from_invs_ext( + function_name!(), + observer, + bitvecs.clone(), + |boot_plan| { + boot_plan + .with_pox_constants(10, 3) + .with_extra_peers(1) + .with_initial_balances(vec![]) + .with_malleablized_blocks(false) + }, + ); let mut other_peer = other_peers.pop().unwrap(); let peer_1_indexer = BitcoinIndexer::new_unit_test(&peer.config.burnchain.working_dir); @@ -909,6 +929,8 @@ impl<'a> TestRPC<'a> { convo_2, canonical_tip: nakamoto_tip.index_block_hash(), consensus_hash: nakamoto_tip.consensus_hash.clone(), + tip_hash: nakamoto_tip.anchored_header.block_hash(), + tip_height: nakamoto_tip.stacks_block_height, microblock_tip_hash: BlockHeaderHash([0x00; 32]), mempool_txids: vec![], microblock_txids: vec![], diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 4f553efd21..c742bcf00b 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -220,6 +220,7 @@ impl MemPoolEventDispatcher for ProposalTestObserver { } #[test] +#[ignore] fn test_try_make_response() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); let test_observer = TestEventObserver::new(); @@ -281,6 +282,7 @@ fn test_try_make_response() { None, None, 8, + None, ) .unwrap(); @@ -376,12 +378,14 @@ fn test_try_make_response() { let observer = ProposalTestObserver::new(); let proposal_observer = Arc::clone(&observer.proposal_observer); + info!("Run requests with observer"); let mut responses = rpc_test.run_with_observer(requests, Some(&observer)); let response = responses.remove(0); // Wait for the results to be non-empty loop { + info!("Wait for results to be non-empty"); if proposal_observer .lock() .unwrap() diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 5cf32a8a56..7d45b39769 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -35,7 +35,7 @@ use crate::chainstate::burn::db::sortdb::{BlockHeaderCache, SortitionDB}; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::StacksPublicKey; -use crate::core::{StacksEpoch, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3}; +use crate::core::{EpochList, StacksEpoch, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3}; use crate::monitoring; use crate::net::asn::ASEntry4; use crate::net::codec::*; @@ -393,7 +393,7 @@ pub struct ConversationP2P { pub reply_handles: VecDeque, /// system epochs - epochs: Vec, + epochs: EpochList, } impl fmt::Display for ConversationP2P { @@ -569,14 +569,14 @@ impl ConversationP2P { conn_opts: &ConnectionOptions, outbound: bool, conn_id: usize, - epochs: Vec, + epochs: EpochList, ) -> ConversationP2P { ConversationP2P { instantiated: get_epoch_time_secs(), - network_id: network_id, - version: version, + network_id, + version, connection: ConnectionP2P::new(StacksP2P::new(), conn_opts, None), - conn_id: conn_id, + conn_id, heartbeat: conn_opts.heartbeat, burnchain: burnchain.clone(), @@ -605,7 +605,7 @@ impl ConversationP2P { db_smart_contracts: vec![], - epochs: epochs, + epochs, } } @@ -750,10 +750,9 @@ impl ConversationP2P { /// Get the current epoch fn get_current_epoch(&self, cur_burn_height: u64) -> StacksEpoch { - let epoch_index = StacksEpoch::find_epoch(&self.epochs, cur_burn_height) - .unwrap_or_else(|| panic!("BUG: block {} is not in a known epoch", cur_burn_height)); - let epoch = self.epochs[epoch_index].clone(); - epoch + self.epochs + .epoch_at_height(cur_burn_height) + .unwrap_or_else(|| panic!("BUG: block {} is not in a known epoch", cur_burn_height)) } /// Determine whether or not a remote node has the proper epoch marker in its peer version @@ -3069,6 +3068,7 @@ mod test { use std::io::prelude::*; use std::io::{Read, Write}; use std::net::{SocketAddr, SocketAddrV4}; + use std::path::PathBuf; use clarity::vm::costs::ExecutionCost; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId}; @@ -3080,6 +3080,7 @@ mod test { use super::*; use crate::burnchains::bitcoin::keys::BitcoinPublicKey; use crate::burnchains::burnchain::*; + use crate::burnchains::db::BurnchainDB; use crate::burnchains::*; use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::*; @@ -3123,6 +3124,8 @@ mod test { let peerdb_path = format!("{}/peers.sqlite", &test_path); let stackerdb_path = format!("{}/stackerdb.sqlite", &test_path); let chainstate_path = format!("{}/chainstate", &test_path); + let burnchain_db = + BurnchainDB::connect(&burnchain.get_burnchaindb_path(), burnchain, true).unwrap(); let mut peerdb = PeerDB::connect( &peerdb_path, @@ -3314,12 +3317,14 @@ mod test { let atlasdb = AtlasDB::connect(atlas_config, &atlasdb_path, true).unwrap(); let stackerdbs = StackerDBs::connect(&stackerdb_path, true).unwrap(); let peerdb = PeerDB::open(&peerdb_path, true).unwrap(); + let burnchain_db = burnchain.open_burnchain_db(false).unwrap(); let local_peer = PeerDB::get_local_peer(peerdb.conn()).unwrap(); let network = PeerNetwork::new( peerdb, atlasdb, stackerdbs, + burnchain_db, local_peer, peer_version, burnchain.clone(), @@ -3331,7 +3336,7 @@ mod test { network } - fn testing_burnchain_config() -> Burnchain { + fn testing_burnchain_config(test_name: &str) -> Burnchain { let first_burn_hash = BurnchainHeaderHash::from_hex( "0000000000000000000000000000000000000000000000000000000000000000", ) @@ -3342,7 +3347,7 @@ mod test { network_id: 0, chain_name: "bitcoin".to_string(), network_name: "testnet".to_string(), - working_dir: "/nope".to_string(), + working_dir: format!("/tmp/stacks-test-databases-{}", test_name), consensus_hash_lifetime: 24, stable_confirmations: 7, first_block_height: 12300, @@ -3366,8 +3371,6 @@ mod test { let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); - let burnchain = testing_burnchain_config(); - let mut chain_view_1 = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -3397,10 +3400,13 @@ mod test { &peer_2_rc_consensus_hash ); + let burnchain_1 = testing_burnchain_config(&test_name_1); + let burnchain_2 = testing_burnchain_config(&test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -3411,7 +3417,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -3422,7 +3428,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -3431,7 +3437,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -3445,7 +3451,7 @@ mod test { peerdb_1 .update_local_peer( 0x9abcdef0, - burnchain.network_id, + burnchain_1.network_id, local_peer_1.data_url, local_peer_1.port, &[ @@ -3458,7 +3464,7 @@ mod test { peerdb_2 .update_local_peer( 0x9abcdef0, - burnchain.network_id, + burnchain_2.network_id, local_peer_2.data_url, local_peer_2.port, &[ @@ -3490,7 +3496,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -3500,7 +3506,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -3708,8 +3714,6 @@ mod test { let socketaddr_1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let socketaddr_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8081); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -3723,10 +3727,13 @@ mod test { let test_name_1 = "convo_handshake_accept_1"; let test_name_2 = "convo_handshake_accept_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -3737,7 +3744,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -3748,7 +3755,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -3757,7 +3764,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -3771,7 +3778,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -3781,7 +3788,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -3887,8 +3894,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -3902,10 +3907,13 @@ mod test { let test_name_1 = "convo_handshake_reject_1"; let test_name_2 = "convo_handshake_reject_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -3916,7 +3924,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -3927,7 +3935,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -3936,7 +3944,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -3950,7 +3958,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -3960,7 +3968,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4026,8 +4034,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4046,10 +4052,13 @@ mod test { let test_name_1 = "convo_handshake_badsignature_1"; let test_name_2 = "convo_handshake_badsignature_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4060,7 +4069,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4071,7 +4080,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4080,7 +4089,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4094,7 +4103,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4104,7 +4113,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4169,8 +4178,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4189,10 +4196,13 @@ mod test { let test_name_1 = "convo_handshake_badpeeraddress_1"; let test_name_2 = "convo_handshake_badpeeraddress_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4203,7 +4213,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4214,7 +4224,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4223,7 +4233,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4237,7 +4247,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4247,7 +4257,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4330,8 +4340,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4345,10 +4353,13 @@ mod test { let test_name_1 = "convo_handshake_update_key_1"; let test_name_2 = "convo_handshake_update_key_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4359,7 +4370,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4370,7 +4381,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4379,7 +4390,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4393,7 +4404,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4403,7 +4414,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4523,8 +4534,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4543,10 +4552,13 @@ mod test { let test_name_1 = "convo_handshake_self_1"; let test_name_2 = "convo_handshake_self_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4557,7 +4569,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4568,7 +4580,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4577,7 +4589,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4591,7 +4603,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4601,7 +4613,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4666,8 +4678,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4686,10 +4696,13 @@ mod test { let test_name_1 = "convo_ping_1"; let test_name_2 = "convo_ping_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4700,7 +4713,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4711,7 +4724,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4720,7 +4733,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4734,7 +4747,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4744,7 +4757,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -4841,8 +4854,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -4861,10 +4872,13 @@ mod test { let test_name_1 = "convo_handshake_ping_loop_1"; let test_name_2 = "convo_handshake_ping_loop_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -4875,7 +4889,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -4886,7 +4900,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -4895,7 +4909,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -4909,7 +4923,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -4919,7 +4933,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5067,8 +5081,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5087,10 +5099,13 @@ mod test { let test_name_1 = "convo_nack_unsolicited_1"; let test_name_2 = "convo_nack_unsolicited_2"; + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -5101,7 +5116,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -5112,7 +5127,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5121,7 +5136,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -5135,7 +5150,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -5145,7 +5160,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5216,8 +5231,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5235,10 +5248,14 @@ mod test { let test_name_1 = "convo_ignore_unsolicited_handshake_1"; let test_name_2 = "convo_ignore_unsolicited_handshake_2"; + + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -5249,7 +5266,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -5260,7 +5277,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5269,7 +5286,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -5283,7 +5300,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -5293,7 +5310,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5390,8 +5407,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12331, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5404,10 +5419,14 @@ mod test { let test_name_1 = "convo_handshake_getblocksinv_1"; let test_name_2 = "convo_handshake_getblocksinv_2"; + + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -5418,7 +5437,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -5429,7 +5448,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5438,7 +5457,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -5452,7 +5471,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -5462,7 +5481,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5667,8 +5686,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12331, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5681,10 +5698,14 @@ mod test { let test_name_1 = "convo_handshake_getnakamotoinv_1"; let test_name_2 = "convo_handshake_getnakamotoinv_2"; + + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12350, "http://peer1.com".into(), @@ -5695,7 +5716,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12351, "http://peer2.com".into(), @@ -5706,7 +5727,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5715,7 +5736,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -5729,7 +5750,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -5739,7 +5760,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -5940,8 +5961,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -5959,10 +5978,14 @@ mod test { let test_name_1 = "convo_natpunch_1"; let test_name_2 = "convo_natpunch_2"; + + let burnchain_1 = testing_burnchain_config(test_name_1); + let burnchain_2 = testing_burnchain_config(test_name_2); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, mut chainstate_1) = make_test_chain_dbs( test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, 12352, "http://peer1.com".into(), @@ -5973,7 +5996,7 @@ mod test { let (mut peerdb_2, mut sortdb_2, stackerdbs_2, pox_id_2, mut chainstate_2) = make_test_chain_dbs( test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, 12353, "http://peer2.com".into(), @@ -5984,7 +6007,7 @@ mod test { let mut net_1 = db_setup( &test_name_1, - &burnchain, + &burnchain_1, 0x9abcdef0, &mut peerdb_1, &mut sortdb_1, @@ -5993,7 +6016,7 @@ mod test { ); let mut net_2 = db_setup( &test_name_2, - &burnchain, + &burnchain_2, 0x9abcdef0, &mut peerdb_2, &mut sortdb_2, @@ -6007,7 +6030,7 @@ mod test { let mut convo_1 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_1, &socketaddr_2, &conn_opts, true, @@ -6017,7 +6040,7 @@ mod test { let mut convo_2 = ConversationP2P::new( 123, 456, - &burnchain, + &burnchain_2, &socketaddr_1, &conn_opts, true, @@ -6081,8 +6104,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -6094,6 +6115,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "convo_is_preamble_valid"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, chainstate_1) = make_test_chain_dbs( test_name_1, @@ -6259,9 +6282,9 @@ mod test { { // convo thinks its epoch 2.05 let epochs = StacksEpoch::unit_test_2_05(chain_view.burn_block_height - 4); - let cur_epoch_idx = - StacksEpoch::find_epoch(&epochs, chain_view.burn_block_height).unwrap(); - let cur_epoch = epochs[cur_epoch_idx].clone(); + let cur_epoch = epochs + .epoch_at_height(chain_view.burn_block_height) + .unwrap(); assert_eq!(cur_epoch.epoch_id, StacksEpochId::Epoch2_05); eprintln!( @@ -6362,7 +6385,7 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); + let burnchain = testing_burnchain_config("unused"); let mut chain_view = BurnchainView { burn_block_height: 12348, @@ -6748,8 +6771,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -6761,6 +6782,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "sign_relay_forward_message_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -6866,8 +6889,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -6879,6 +6900,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "sign_and_forward_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -6933,8 +6956,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -6946,6 +6967,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "validate_block_push_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -7067,8 +7090,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -7080,6 +7101,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "validate_transaction_push_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -7201,8 +7224,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -7214,6 +7235,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "validate_microblocks_push_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, @@ -7335,8 +7358,6 @@ mod test { ) .unwrap(); - let burnchain = testing_burnchain_config(); - let mut chain_view = BurnchainView { burn_block_height: 12348, burn_block_hash: BurnchainHeaderHash([0x11; 32]), @@ -7348,6 +7369,8 @@ mod test { chain_view.make_test_data(); let test_name_1 = "validate_stackerdb_push_1"; + let burnchain = testing_burnchain_config(test_name_1); + let (mut peerdb_1, mut sortdb_1, stackerdbs_1, pox_id_1, _) = make_test_chain_dbs( test_name_1, &burnchain, diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 85fe9d7494..4eeec0daaf 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -474,6 +474,8 @@ pub struct ConnectionOptions { /// the reward cycle in which Nakamoto activates, and thus needs to run both the epoch /// 2.x and Nakamoto state machines. pub force_nakamoto_epoch_transition: bool, + /// Reject blocks that were pushed + pub reject_blocks_pushed: bool, // test facilitation /// Do not require that an unsolicited message originate from an authenticated, connected @@ -583,6 +585,7 @@ impl std::default::Default for ConnectionOptions { disable_stackerdb_sync: false, force_disconnect_interval: None, force_nakamoto_epoch_transition: false, + reject_blocks_pushed: false, // no test facilitations on by default test_disable_unsolicited_message_authentication: false, diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 02ed8b9419..4c509ed5c1 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -68,6 +68,9 @@ use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; +/// How often to check for unconfirmed tenures +const CHECK_UNCONFIRMED_TENURES_MS: u128 = 1_000; + /// The overall downloader can operate in one of two states: /// * it's doing IBD, in which case it's downloading tenures using neighbor inventories and /// the start/end block ID hashes obtained from block-commits. This works up until the last two @@ -118,6 +121,10 @@ pub struct NakamotoDownloadStateMachine { pub(super) neighbor_rpc: NeighborRPC, /// Nakamoto chain tip nakamoto_tip: StacksBlockId, + /// do we need to fetch unconfirmed tenures? + fetch_unconfirmed_tenures: bool, + /// last time an unconfirmed tenures was checked + last_unconfirmed_download_check_ms: u128, /// last time an unconfirmed downloader was run last_unconfirmed_download_run_ms: u128, } @@ -139,6 +146,8 @@ impl NakamotoDownloadStateMachine { unconfirmed_tenure_downloads: HashMap::new(), neighbor_rpc: NeighborRPC::new(), nakamoto_tip, + fetch_unconfirmed_tenures: false, + last_unconfirmed_download_check_ms: 0, last_unconfirmed_download_run_ms: 0, } } @@ -465,142 +474,6 @@ impl NakamotoDownloadStateMachine { Ok(()) } - /// Determine if the set of `TenureStartEnd`s represents available but unfetched data. Used to - /// determine whether or not to update the set of wanted tenures -- we don't want to skip - /// fetching wanted tenures if they're still available! - pub(crate) fn have_unprocessed_tenures<'a>( - first_nakamoto_rc: u64, - completed_tenures: &HashSet, - prev_wanted_tenures: &[WantedTenure], - tenure_block_ids: &HashMap, - pox_constants: &PoxConstants, - first_burn_height: u64, - inventory_iter: impl Iterator, - ) -> bool { - if prev_wanted_tenures.is_empty() { - debug!("prev_wanted_tenures is empty, so we have unprocessed tenures"); - return true; - } - - // the anchor block for prev_wanted_tenures must not only be processed, but also we have to - // have seen an inventory message from the subsequent reward cycle. If we can see - // inventory messages for the reward cycle after `prev_wanted_rc`, then the former will be - // true - let prev_wanted_rc = prev_wanted_tenures - .last() - .map(|wt| { - downloader_block_height_to_reward_cycle( - pox_constants, - first_burn_height, - wt.burn_height, - ) - .expect("FATAL: wanted tenure before system start") - }) - .unwrap_or(u64::MAX); - - let cur_wanted_rc = prev_wanted_rc.saturating_add(1); - - debug!( - "have_unprocessed_tenures: prev_wanted_rc = {}, cur_wanted_rc = {}", - prev_wanted_rc, cur_wanted_rc - ); - - let mut has_prev_inv = false; - let mut has_cur_inv = false; - let mut num_invs = 0; - for inv in inventory_iter { - num_invs += 1; - if prev_wanted_rc < first_nakamoto_rc { - // assume the epoch 2.x inventory has this - has_prev_inv = true; - } else if inv.tenures_inv.get(&prev_wanted_rc).is_some() { - has_prev_inv = true; - } - - if cur_wanted_rc < first_nakamoto_rc { - // assume the epoch 2.x inventory has this - has_cur_inv = true; - } else if inv.tenures_inv.get(&cur_wanted_rc).is_some() { - has_cur_inv = true; - } - } - - if !has_prev_inv || !has_cur_inv { - debug!("No peer has an inventory for either the previous ({}: available = {}) or current ({}: available = {}) wanted tenures. Total inventories: {}", prev_wanted_rc, has_prev_inv, cur_wanted_rc, has_cur_inv, num_invs); - return true; - } - - // the state machine updates `tenure_block_ids` _after_ `wanted_tenures`, so verify that - // this isn't a stale `tenure_block_ids` by checking that it contains at least one block in - // the prev_wanted_rc and at least one in the cur_wanted_rc - let mut has_prev_rc_block = false; - let mut has_cur_rc_block = false; - let mut available_considered = 0; - for (_naddr, available) in tenure_block_ids.iter() { - available_considered += available.len(); - debug!("Consider available tenures from {}", _naddr); - for (_ch, tenure_info) in available.iter() { - debug!("Consider tenure info for {}: {:?}", _ch, tenure_info); - if tenure_info.start_reward_cycle == prev_wanted_rc - || tenure_info.end_reward_cycle == prev_wanted_rc - { - has_prev_rc_block = true; - debug!( - "Consider tenure info for {}: have a tenure in prev reward cycle {}", - _ch, prev_wanted_rc - ); - } - if tenure_info.start_reward_cycle == cur_wanted_rc - || tenure_info.end_reward_cycle == cur_wanted_rc - { - has_cur_rc_block = true; - debug!( - "Consider tenure info for {}: have a tenure in cur reward cycle {}", - _ch, cur_wanted_rc - ); - } - } - } - - if available_considered > 0 - && ((prev_wanted_rc >= first_nakamoto_rc && !has_prev_rc_block) - || (cur_wanted_rc >= first_nakamoto_rc && !has_cur_rc_block)) - { - debug!( - "tenure_block_ids stale: missing representation in reward cycles {} ({}) and {} ({})", - prev_wanted_rc, - has_prev_rc_block, - cur_wanted_rc, - has_cur_rc_block, - ); - return true; - } - - let mut ret = false; - for (_naddr, available) in tenure_block_ids.iter() { - for wt in prev_wanted_tenures.iter() { - let Some(tenure_info) = available.get(&wt.tenure_id_consensus_hash) else { - continue; - }; - if completed_tenures.contains(&tenure_info.tenure_id_consensus_hash) { - // this check is necessary because the check for .processed requires that a - // child tenure block has been processed, which isn't guaranteed at a reward - // cycle boundary - debug!("Tenure {:?} has been fully downloaded", &tenure_info); - continue; - } - if !tenure_info.processed { - debug!( - "Tenure {:?} is available from {} but not processed", - &tenure_info, &_naddr - ); - ret = true; - } - } - } - ret - } - /// Update the state machine's wanted tenures and processed tenures, if it's time to do so. /// This will only happen when the sortition DB has finished processing a reward cycle of /// tenures when in IBD mode, _OR_ when the sortition tip advances when in steady-state mode. @@ -612,8 +485,7 @@ impl NakamotoDownloadStateMachine { /// cycle boundaries, where the sortition DB is about to begin processing a new reward cycle. /// The list of wanted tenures for the current reward cycle will be saved as /// `self.prev_wanted_tenures`, and the set of wanted tenures for the next reward cycle - /// will be stored to `self.wanted_tenures`. It will only update these two lists if it is safe - /// to do so, as determined by `have_unprocessed_tenures()`. + /// will be stored to `self.wanted_tenures`. /// /// In the second case (i.e. not a reward cycle boundary), this function will load up _new_ /// wanted tenure data and append it to `self.wanted_tenures` via @@ -1312,6 +1184,16 @@ impl NakamotoDownloadStateMachine { continue; } + let _ = downloader + .try_advance_from_chainstate(chainstate) + .map_err(|e| { + warn!( + "Failed to advance downloader in state {} for {}: {:?}", + &downloader.state, &downloader.naddr, &e + ); + e + }); + debug!( "Send request to {} for tenure {:?} (state {})", &naddr, @@ -1355,6 +1237,7 @@ impl NakamotoDownloadStateMachine { ) { Ok(blocks_opt) => blocks_opt, Err(NetError::StaleView) => { + neighbor_rpc.add_dead(network, &naddr); continue; } Err(e) => { @@ -1428,13 +1311,16 @@ impl NakamotoDownloadStateMachine { fn download_confirmed_tenures( &mut self, network: &mut PeerNetwork, + chainstate: &mut StacksChainState, max_count: usize, ) -> HashMap> { // queue up more downloaders self.update_tenure_downloaders(max_count, &network.current_reward_sets); // run all downloaders - let new_blocks = self.tenure_downloads.run(network, &mut self.neighbor_rpc); + let new_blocks = self + .tenure_downloads + .run(network, &mut self.neighbor_rpc, chainstate); new_blocks } @@ -1445,7 +1331,7 @@ impl NakamotoDownloadStateMachine { &mut self, network: &mut PeerNetwork, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, highest_processed_block_id: Option, ) -> HashMap> { // queue up more downloaders @@ -1467,7 +1353,7 @@ impl NakamotoDownloadStateMachine { // already downloaded all confirmed tenures), so there's no risk of clobberring any other // in-flight requests. let new_confirmed_blocks = if self.tenure_downloads.inflight() > 0 { - self.download_confirmed_tenures(network, 0) + self.download_confirmed_tenures(network, chainstate, 0) } else { HashMap::new() }; @@ -1542,19 +1428,22 @@ impl NakamotoDownloadStateMachine { burnchain_height: u64, network: &mut PeerNetwork, sortdb: &SortitionDB, - chainstate: &StacksChainState, + chainstate: &mut StacksChainState, ibd: bool, ) -> HashMap> { - debug!("NakamotoDownloadStateMachine in state {}", &self.state); + debug!( + "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}, state={}", + burnchain_height, network.burnchain_tip.block_height, &self.state; + "has_network_inventories" => network.inv_state_nakamoto.is_some(), + "next_unconfirmed_check" => self.last_unconfirmed_download_check_ms.saturating_add(CHECK_UNCONFIRMED_TENURES_MS) / 1000, + "timestamp_ms" => get_epoch_time_ms(), + ); + let Some(invs) = network.inv_state_nakamoto.as_ref() else { // nothing to do - debug!("No network inventories"); return HashMap::new(); }; - debug!( - "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}, state={}", - burnchain_height, network.burnchain_tip.block_height, &self.state - ); + self.update_available_tenures( &invs.inventories, &sortdb.pox_constants, @@ -1563,24 +1452,35 @@ impl NakamotoDownloadStateMachine { ); // check this now, since we mutate self.available - let need_unconfirmed_tenures = Self::need_unconfirmed_tenures( - burnchain_height, - &network.burnchain_tip, - &self.wanted_tenures, - self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), - &self.tenure_block_ids, - &self.available_tenures, - ); + self.fetch_unconfirmed_tenures = if self + .last_unconfirmed_download_check_ms + .saturating_add(CHECK_UNCONFIRMED_TENURES_MS) + > get_epoch_time_ms() + { + false + } else { + let do_fetch = Self::need_unconfirmed_tenures( + burnchain_height, + &network.burnchain_tip, + &self.wanted_tenures, + self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), + &self.tenure_block_ids, + &self.available_tenures, + ); + self.last_unconfirmed_download_check_ms = get_epoch_time_ms(); + do_fetch + }; match self.state { NakamotoDownloadState::Confirmed => { let new_blocks = self.download_confirmed_tenures( network, + chainstate, usize::try_from(network.get_connection_opts().max_inflight_blocks) .expect("FATAL: max_inflight_blocks exceeds usize::MAX"), ); - if self.tenure_downloads.is_empty() && need_unconfirmed_tenures { + if self.tenure_downloads.is_empty() && self.fetch_unconfirmed_tenures { debug!( "Transition from {} to {}", &self.state, @@ -1625,7 +1525,7 @@ impl NakamotoDownloadStateMachine { } else if self.unconfirmed_tenure_downloads.is_empty() && self.unconfirmed_tenure_download_schedule.is_empty() { - if need_unconfirmed_tenures { + if self.fetch_unconfirmed_tenures { // do this again self.unconfirmed_tenure_download_schedule = Self::make_unconfirmed_tenure_download_schedule( diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 53f9105156..0f4e3d53cb 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -98,6 +98,12 @@ impl WantedTenure { pub struct TenureStartEnd { /// Consensus hash that identifies the start of the tenure pub tenure_id_consensus_hash: ConsensusHash, + /// Consensus hash that identifies the snapshot with the start block ID + pub start_block_snapshot_consensus_hash: ConsensusHash, + /// Consensus hash that identifies the snapshot with the end block ID + pub end_block_snapshot_consensus_hash: ConsensusHash, + /// Burnchain block height of tenure ID consensus hash + pub tenure_id_burn_block_height: u64, /// Tenure-start block ID pub start_block_id: StacksBlockId, /// Last block ID @@ -119,7 +125,10 @@ pub type AvailableTenures = HashMap; impl TenureStartEnd { pub fn new( tenure_id_consensus_hash: ConsensusHash, + tenure_id_burn_block_height: u64, + start_block_snapshot_consensus_hash: ConsensusHash, start_block_id: StacksBlockId, + end_block_snapshot_consensus_hash: ConsensusHash, end_block_id: StacksBlockId, start_reward_cycle: u64, end_reward_cycle: u64, @@ -127,7 +136,10 @@ impl TenureStartEnd { ) -> Self { Self { tenure_id_consensus_hash, + tenure_id_burn_block_height, + start_block_snapshot_consensus_hash, start_block_id, + end_block_snapshot_consensus_hash, end_block_id, start_reward_cycle, end_reward_cycle, @@ -214,7 +226,10 @@ impl TenureStartEnd { let tenure_start_end = TenureStartEnd::new( wt.tenure_id_consensus_hash.clone(), + wt.burn_height, + wt_start.tenure_id_consensus_hash.clone(), wt_start.winning_block_id.clone(), + wt_end.tenure_id_consensus_hash.clone(), wt_end.winning_block_id.clone(), rc, rc, @@ -322,7 +337,10 @@ impl TenureStartEnd { let mut tenure_start_end = TenureStartEnd::new( wt.tenure_id_consensus_hash.clone(), + wt.burn_height, + wt_start.tenure_id_consensus_hash.clone(), wt_start.winning_block_id.clone(), + wt_end.tenure_id_consensus_hash.clone(), wt_end.winning_block_id.clone(), rc, pox_constants diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 4c5efaccdd..e2716e8252 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -43,7 +43,7 @@ use crate::chainstate::nakamoto::{ use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, + Error as chainstate_error, StacksBlockHeader, TenureChangePayload, TransactionPayload, }; use crate::core::{ EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, @@ -119,9 +119,13 @@ impl fmt::Display for NakamotoTenureDownloadState { pub struct NakamotoTenureDownloader { /// Consensus hash that identifies this tenure pub tenure_id_consensus_hash: ConsensusHash, + /// Consensus hash that identifies the snapshot from whence we obtained tenure_start_block_id + pub start_block_snapshot_consensus_hash: ConsensusHash, /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and /// sortition DB. pub tenure_start_block_id: StacksBlockId, + /// Consensus hash that identifies the snapshot from whence we obtained tenure_end_block_id + pub end_block_snapshot_consensus_hash: ConsensusHash, /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID /// for some other tenure). Learned from the inventory state machine and sortition DB. pub tenure_end_block_id: StacksBlockId, @@ -150,19 +154,27 @@ pub struct NakamotoTenureDownloader { impl NakamotoTenureDownloader { pub fn new( tenure_id_consensus_hash: ConsensusHash, + start_block_snapshot_consensus_hash: ConsensusHash, tenure_start_block_id: StacksBlockId, + end_block_snapshot_consensus_hash: ConsensusHash, tenure_end_block_id: StacksBlockId, naddr: NeighborAddress, start_signer_keys: RewardSet, end_signer_keys: RewardSet, ) -> Self { debug!( - "Instantiate downloader to {} for tenure {}: {}-{}", - &naddr, &tenure_id_consensus_hash, &tenure_start_block_id, &tenure_end_block_id, + "Instantiate downloader to {}-{} for tenure {}: {}-{}", + &naddr, + &tenure_id_consensus_hash, + &start_block_snapshot_consensus_hash, + &tenure_start_block_id, + &tenure_end_block_id, ); Self { tenure_id_consensus_hash, + start_block_snapshot_consensus_hash, tenure_start_block_id, + end_block_snapshot_consensus_hash, tenure_end_block_id, naddr, start_signer_keys, @@ -270,7 +282,9 @@ impl NakamotoTenureDownloader { return Err(NetError::InvalidState); }; - if self.tenure_end_block_id != tenure_end_block.header.block_id() { + if self.tenure_end_block_id != tenure_end_block.header.block_id() + && self.tenure_end_block_id != StacksBlockId([0x00; 32]) + { // not the block we asked for warn!("Invalid tenure-end block: unexpected"; "tenure_id" => %self.tenure_id_consensus_hash, @@ -541,6 +555,177 @@ impl NakamotoTenureDownloader { Ok(Some(request)) } + /// Advance the state of the downloader from chainstate, if possible. + /// For example, a tenure-start or tenure-end block may have been pushed to us already (or they + /// may be shadow blocks) + pub fn try_advance_from_chainstate( + &mut self, + chainstate: &mut StacksChainState, + ) -> Result<(), NetError> { + loop { + match self.state { + NakamotoTenureDownloadState::GetTenureStartBlock( + start_block_id, + start_request_time, + ) => { + if chainstate + .nakamoto_blocks_db() + .is_shadow_tenure(&self.start_block_snapshot_consensus_hash)? + { + debug!( + "Tenure {} start-block confirmed by shadow tenure {}", + &self.tenure_id_consensus_hash, + &self.start_block_snapshot_consensus_hash + ); + let Some(shadow_block) = chainstate + .nakamoto_blocks_db() + .get_shadow_tenure_start_block( + &self.start_block_snapshot_consensus_hash, + )? + else { + warn!( + "No tenure-start block for shadow tenure {}", + &self.start_block_snapshot_consensus_hash + ); + break; + }; + + // the coinbase of a tenure-start block of a shadow tenure contains the + // block-id of the parent tenure's start block (i.e. the information that + // would have been gleaned from a block-commit, if there was one). + let Some(shadow_coinbase) = shadow_block.get_coinbase_tx() else { + warn!("Shadow block {} has no coinbase", &shadow_block.block_id()); + break; + }; + + let TransactionPayload::Coinbase(coinbase_payload, ..) = + &shadow_coinbase.payload + else { + warn!( + "Shadow block {} coinbase tx is not a Coinbase", + &shadow_block.block_id() + ); + break; + }; + + let tenure_start_block_id = StacksBlockId(coinbase_payload.0.clone()); + + info!( + "Tenure {} starts at shadow tenure-start {}, not {}", + &self.tenure_id_consensus_hash, &tenure_start_block_id, &start_block_id + ); + self.tenure_start_block_id = tenure_start_block_id.clone(); + self.state = NakamotoTenureDownloadState::GetTenureStartBlock( + tenure_start_block_id, + start_request_time, + ); + if let Some((tenure_start_block, _sz)) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&self.tenure_start_block_id)? + { + // normal block on disk + self.try_accept_tenure_start_block(tenure_start_block)?; + } + } else if let Some((tenure_start_block, _sz)) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&start_block_id)? + { + // we have downloaded this block already + self.try_accept_tenure_start_block(tenure_start_block)?; + } else { + break; + } + if let NakamotoTenureDownloadState::GetTenureStartBlock(..) = &self.state { + break; + } + } + NakamotoTenureDownloadState::GetTenureEndBlock( + end_block_id, + start_request_time, + ) => { + if chainstate + .nakamoto_blocks_db() + .is_shadow_tenure(&self.end_block_snapshot_consensus_hash)? + { + debug!( + "Tenure {} end-block confirmed by shadow tenure {}", + &self.tenure_id_consensus_hash, &self.end_block_snapshot_consensus_hash + ); + let Some(shadow_block) = chainstate + .nakamoto_blocks_db() + .get_shadow_tenure_start_block( + &self.end_block_snapshot_consensus_hash, + )? + else { + warn!( + "No tenure-start block for shadow tenure {}", + &self.end_block_snapshot_consensus_hash + ); + break; + }; + + // the coinbase of a tenure-start block of a shadow tenure contains the + // block-id of the parent tenure's start block (i.e. the information that + // would have been gleaned from a block-commit, if there was one). + let Some(shadow_coinbase) = shadow_block.get_coinbase_tx() else { + warn!("Shadow block {} has no coinbase", &shadow_block.block_id()); + break; + }; + + let TransactionPayload::Coinbase(coinbase_payload, ..) = + &shadow_coinbase.payload + else { + warn!( + "Shadow block {} coinbase tx is not a Coinbase", + &shadow_block.block_id() + ); + break; + }; + + let tenure_end_block_id = StacksBlockId(coinbase_payload.0.clone()); + + info!( + "Tenure {} ends at shadow tenure-start {}, not {}", + &self.tenure_id_consensus_hash, &tenure_end_block_id, &end_block_id + ); + self.tenure_end_block_id = tenure_end_block_id.clone(); + self.state = NakamotoTenureDownloadState::GetTenureEndBlock( + tenure_end_block_id, + start_request_time, + ); + if let Some((tenure_end_block, _sz)) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&self.tenure_end_block_id)? + { + // normal block on disk + self.try_accept_tenure_end_block(&tenure_end_block)?; + } + } else if let Some((tenure_end_block, _sz)) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&end_block_id)? + { + // normal block on disk + self.try_accept_tenure_end_block(&tenure_end_block)?; + } else { + break; + }; + if let NakamotoTenureDownloadState::GetTenureEndBlock(..) = &self.state { + break; + } + } + NakamotoTenureDownloadState::GetTenureBlocks(..) => { + // TODO: look at the chainstate and find out what we don't have to download + // TODO: skip shadow tenures + break; + } + NakamotoTenureDownloadState::Done => { + break; + } + } + } + Ok(()) + } + /// Begin the next download request for this state machine. The request will be sent to the /// data URL corresponding to self.naddr. /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index 49b32c2634..08714f5cbf 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -67,6 +67,35 @@ use crate::net::server::HttpPeer; use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; use crate::util_lib::db::{DBConn, Error as DBError}; +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct CompletedTenure { + tenure_id: ConsensusHash, + start_block: StacksBlockId, + end_block: StacksBlockId, +} + +impl From<&TenureStartEnd> for CompletedTenure { + fn from(tse: &TenureStartEnd) -> Self { + Self { + tenure_id: tse.tenure_id_consensus_hash.clone(), + start_block: tse.start_block_id.clone(), + end_block: tse.end_block_id.clone(), + } + } +} + +impl From<&mut NakamotoTenureDownloader> for CompletedTenure { + fn from(ntd: &mut NakamotoTenureDownloader) -> Self { + Self { + tenure_id: ntd.tenure_id_consensus_hash, + start_block: ntd.tenure_start_block_id, + end_block: ntd.tenure_end_block_id, + } + } +} + +pub const PEER_DEPRIORITIZATION_TIME_SECS: u64 = 60; + /// A set of confirmed downloader state machines assigned to one or more neighbors. The block /// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure /// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer @@ -83,7 +112,14 @@ pub struct NakamotoTenureDownloaderSet { pub(crate) peers: HashMap, /// The set of tenures that have been successfully downloaded (but possibly not yet stored or /// processed) - pub(crate) completed_tenures: HashSet, + pub(crate) completed_tenures: HashSet, + /// Number of times a tenure download was attempted + pub(crate) attempted_tenures: HashMap, + /// Number of times a tenure download failed + pub(crate) attempt_failed_tenures: HashMap, + /// Peers that should be deprioritized because they're dead (maps to when they can be used + /// again) + pub(crate) deprioritized_peers: HashMap, } impl NakamotoTenureDownloaderSet { @@ -92,15 +128,51 @@ impl NakamotoTenureDownloaderSet { downloaders: vec![], peers: HashMap::new(), completed_tenures: HashSet::new(), + attempted_tenures: HashMap::new(), + attempt_failed_tenures: HashMap::new(), + deprioritized_peers: HashMap::new(), } } + /// Mark a tenure as having failed to download. + /// Implemented statically to appease the borrow checker. + fn mark_failure(attempt_failed_tenures: &mut HashMap, ch: &ConsensusHash) { + if let Some(failures) = attempt_failed_tenures.get_mut(ch) { + *failures = (*failures).saturating_add(1); + } else { + attempt_failed_tenures.insert(ch.clone(), 1); + } + } + + /// Mark a peer as deprioritized + /// Implemented statically to appease the borrow checker. + fn mark_deprioritized( + deprioritized_peers: &mut HashMap, + peer: &NeighborAddress, + ) { + deprioritized_peers.insert( + peer.clone(), + get_epoch_time_secs() + PEER_DEPRIORITIZATION_TIME_SECS, + ); + } + + /// Mark a peer and its tenure as dead and failed + fn mark_failed_and_deprioritize_peer( + attempted_failed_tenures: &mut HashMap, + deprioritized_peers: &mut HashMap, + ch: &ConsensusHash, + peer: &NeighborAddress, + ) { + Self::mark_failure(attempted_failed_tenures, ch); + Self::mark_deprioritized(deprioritized_peers, peer); + } + /// Assign the given peer to the given downloader state machine. Allocate a slot for it if /// needed. fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { debug!( - "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, &naddr + "Add downloader for tenure {} driven by {naddr}", + &downloader.tenure_id_consensus_hash ); if let Some(idx) = self.peers.get(&naddr) { self.downloaders[*idx] = Some(downloader); @@ -154,7 +226,7 @@ impl NakamotoTenureDownloaderSet { ) { for (naddr, downloader) in iter { if self.has_downloader(&naddr) { - debug!("Already have downloader for {}", &naddr); + debug!("Already have downloader for {naddr}"); continue; } self.add_downloader(naddr, downloader); @@ -180,15 +252,6 @@ impl NakamotoTenureDownloaderSet { cnt } - /// Determine whether or not there exists a downloader for the given tenure, identified by its - /// consensus hash. - pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { - self.downloaders - .iter() - .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) - .is_some() - } - /// Determine if this downloader set is empty -- i.e. there's no in-progress downloaders. pub fn is_empty(&self) -> bool { for downloader_opt in self.downloaders.iter() { @@ -218,8 +281,8 @@ impl NakamotoTenureDownloaderSet { }; debug!( - "Peer {} already bound to downloader for {}", - &naddr, &_downloader.tenure_id_consensus_hash + "Peer {naddr} already bound to downloader for {}", + &_downloader.tenure_id_consensus_hash ); return true; } @@ -231,8 +294,8 @@ impl NakamotoTenureDownloaderSet { continue; } debug!( - "Assign peer {} to work on downloader for {} in state {}", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + "Assign peer {naddr} to work on downloader for {} in state {}", + &downloader.tenure_id_consensus_hash, &downloader.state ); downloader.naddr = naddr.clone(); self.peers.insert(naddr, i); @@ -251,15 +314,15 @@ impl NakamotoTenureDownloaderSet { idled.push(naddr.clone()); continue; }; - let Some(downloader) = downloader_opt else { - debug!("Remove peer {} for null download {}", &naddr, i); + let Some(downloader) = downloader_opt.as_ref() else { + debug!("Remove peer {naddr} for null download {i}"); idled.push(naddr.clone()); continue; }; if downloader.idle { debug!( - "Remove idled peer {} for tenure download {}", - &naddr, &downloader.tenure_id_consensus_hash + "Remove idled peer {naddr} for tenure download {}", + &downloader.tenure_id_consensus_hash ); idled.push(naddr.clone()); } @@ -273,10 +336,12 @@ impl NakamotoTenureDownloaderSet { /// this up with a call to `clear_available_peers()`. pub fn clear_finished_downloaders(&mut self) { for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { + // clear the downloader if it's done by setting it to None + if downloader_opt + .as_ref() + .map(|dl| dl.is_done()) + .unwrap_or(false) + { *downloader_opt = None; } } @@ -306,8 +371,8 @@ impl NakamotoTenureDownloaderSet { }; if &downloader.tenure_id_consensus_hash == tenure_id { debug!( - "Have downloader for tenure {} already (idle={}, state={}, naddr={})", - tenure_id, downloader.idle, &downloader.state, &downloader.naddr + "Have downloader for tenure {tenure_id} already (idle={}, state={}, naddr={})", + downloader.idle, &downloader.state, &downloader.naddr ); return true; } @@ -337,32 +402,35 @@ impl NakamotoTenureDownloaderSet { self.clear_finished_downloaders(); self.clear_available_peers(); - while self.inflight() < count { + while self.num_scheduled_downloaders() < count { let Some(ch) = schedule.front() else { break; }; - if self.completed_tenures.contains(&ch) { - debug!("Already successfully downloaded tenure {}", &ch); - schedule.pop_front(); - continue; - } let Some(neighbors) = available.get_mut(ch) else { // not found on any neighbors, so stop trying this tenure - debug!("No neighbors have tenure {}", ch); + debug!("No neighbors have tenure {ch}"); schedule.pop_front(); continue; }; if neighbors.is_empty() { // no more neighbors to try - debug!("No more neighbors can serve tenure {}", ch); + debug!("No more neighbors can serve tenure {ch}"); schedule.pop_front(); continue; } let Some(naddr) = neighbors.pop() else { - debug!("No more neighbors can serve tenure {}", ch); + debug!("No more neighbors can serve tenure {ch}"); schedule.pop_front(); continue; }; + if get_epoch_time_secs() < *self.deprioritized_peers.get(&naddr).unwrap_or(&0) { + debug!( + "Peer {} is deprioritized until {naddr}", + self.deprioritized_peers.get(&naddr).unwrap_or(&0) + ); + continue; + } + if self.try_resume_peer(naddr.clone()) { continue; }; @@ -373,23 +441,40 @@ impl NakamotoTenureDownloaderSet { let Some(available_tenures) = tenure_block_ids.get(&naddr) else { // this peer doesn't have any known tenures, so try the others - debug!("No tenures available from {}", &naddr); + debug!("No tenures available from {naddr}"); continue; }; let Some(tenure_info) = available_tenures.get(ch) else { // this peer does not have a tenure start/end block for this tenure, so try the // others. - debug!("Neighbor {} does not serve tenure {}", &naddr, ch); + debug!("Neighbor {naddr} does not serve tenure {ch}"); continue; }; + if tenure_info.processed { + // we already have this tenure + debug!("Already have processed tenure {ch}"); + self.completed_tenures + .remove(&CompletedTenure::from(tenure_info)); + continue; + } + if self + .completed_tenures + .contains(&CompletedTenure::from(tenure_info)) + { + debug!( + "Already successfully downloaded tenure {ch} ({}-{})", + &tenure_info.start_block_id, &tenure_info.end_block_id + ); + schedule.pop_front(); + continue; + } let Some(Some(start_reward_set)) = current_reward_cycles .get(&tenure_info.start_reward_cycle) .map(|cycle_info| cycle_info.reward_set()) else { debug!( - "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {:?}", + "Cannot fetch tenure-start block due to no known start reward set for cycle {}: {tenure_info:?}", tenure_info.start_reward_cycle, - &tenure_info ); schedule.pop_front(); continue; @@ -399,38 +484,45 @@ impl NakamotoTenureDownloaderSet { .map(|cycle_info| cycle_info.reward_set()) else { debug!( - "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {:?}", + "Cannot fetch tenure-end block due to no known end reward set for cycle {}: {tenure_info:?}", tenure_info.end_reward_cycle, - &tenure_info ); schedule.pop_front(); continue; }; - info!("Download tenure {}", &ch; + let attempt_count = *self.attempted_tenures.get(&ch).unwrap_or(&0); + self.attempted_tenures + .insert(ch.clone(), attempt_count.saturating_add(1)); + + let attempt_failed_count = *self.attempt_failed_tenures.get(&ch).unwrap_or(&0); + + info!("Download tenure {ch}"; + "peer" => %naddr, + "attempt" => attempt_count.saturating_add(1), + "failed" => attempt_failed_count, + "downloads_scheduled" => %self.num_scheduled_downloaders(), + "downloads_total" => %self.num_downloaders(), + "downloads_max_count" => count, + "downloads_inflight" => self.inflight(), "tenure_start_block" => %tenure_info.start_block_id, "tenure_end_block" => %tenure_info.end_block_id, "tenure_start_reward_cycle" => tenure_info.start_reward_cycle, - "tenure_end_reward_cycle" => tenure_info.end_reward_cycle); + "tenure_end_reward_cycle" => tenure_info.end_reward_cycle, + "tenure_burn_height" => tenure_info.tenure_id_burn_block_height); - debug!( - "Download tenure {} (start={}, end={}) (rc {},{})", - &ch, - &tenure_info.start_block_id, - &tenure_info.end_block_id, - tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle - ); let tenure_download = NakamotoTenureDownloader::new( ch.clone(), + tenure_info.start_block_snapshot_consensus_hash.clone(), tenure_info.start_block_id.clone(), + tenure_info.end_block_snapshot_consensus_hash.clone(), tenure_info.end_block_id.clone(), naddr.clone(), start_reward_set.clone(), end_reward_set.clone(), ); - debug!("Request tenure {} from neighbor {}", ch, &naddr); + debug!("Request tenure {ch} from neighbor {naddr}"); self.add_downloader(naddr, tenure_download); schedule.pop_front(); } @@ -450,6 +542,7 @@ impl NakamotoTenureDownloaderSet { &mut self, network: &mut PeerNetwork, neighbor_rpc: &mut NeighborRPC, + chainstate: &mut StacksChainState, ) -> HashMap> { let addrs: Vec<_> = self.peers.keys().cloned().collect(); let mut finished = vec![]; @@ -459,28 +552,48 @@ impl NakamotoTenureDownloaderSet { // send requests for (naddr, index) in self.peers.iter() { if neighbor_rpc.has_inflight(&naddr) { - debug!("Peer {} has an inflight request", &naddr); + debug!("Peer {naddr} has an inflight request"); continue; } let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); + debug!("No downloader for {naddr}"); continue; }; if downloader.is_done() { debug!( - "Downloader for {} on tenure {} is finished", - &naddr, &downloader.tenure_id_consensus_hash + "Downloader for {naddr} on tenure {} is finished", + &downloader.tenure_id_consensus_hash ); finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + finished_tenures.push(CompletedTenure::from(downloader)); continue; } + + let _ = downloader + .try_advance_from_chainstate(chainstate) + .map_err(|e| { + warn!( + "Failed to advance downloader in state {} for {}: {:?}", + &downloader.state, &downloader.naddr, &e + ); + e + }); + debug!( - "Send request to {} for tenure {} (state {})", - &naddr, &downloader.tenure_id_consensus_hash, &downloader.state + "Send request to {naddr} for tenure {} (state {})", + &downloader.tenure_id_consensus_hash, &downloader.state ); let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - debug!("Downloader for {} failed; this peer is dead", &naddr); + info!( + "Downloader for tenure {} to {naddr} failed; this peer is dead", + &downloader.tenure_id_consensus_hash, + ); + Self::mark_failed_and_deprioritize_peer( + &mut self.attempt_failed_tenures, + &mut self.deprioritized_peers, + &downloader.tenure_id_consensus_hash, + naddr, + ); neighbor_rpc.add_dead(network, naddr); continue; }; @@ -494,12 +607,12 @@ impl NakamotoTenureDownloaderSet { // clear dead, broken, and done for naddr in addrs.iter() { if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); + debug!("Remove dead/broken downloader for {naddr}"); self.clear_downloader(&naddr); } } for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); + debug!("Remove finished downloader for {done_naddr}"); self.clear_downloader(&done_naddr); } for done_tenure in finished_tenures.drain(..) { @@ -509,23 +622,35 @@ impl NakamotoTenureDownloaderSet { // handle responses for (naddr, response) in neighbor_rpc.collect_replies(network) { let Some(index) = self.peers.get(&naddr) else { - debug!("No downloader for {}", &naddr); + debug!("No downloader for {naddr}"); continue; }; let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - debug!("No downloader for {}", &naddr); + debug!("No downloader for {naddr}"); continue; }; - debug!("Got response from {}", &naddr); + debug!("Got response from {naddr}"); let Ok(blocks_opt) = downloader .handle_next_download_response(response) .map_err(|e| { - debug!("Failed to handle response from {}: {:?}", &naddr, &e); + info!( + "Failed to handle response from {naddr} on tenure {}: {e}", + &downloader.tenure_id_consensus_hash, + ); e }) else { - debug!("Failed to handle download response from {}", &naddr); + debug!( + "Failed to handle download response from {naddr} on tenure {}", + &downloader.tenure_id_consensus_hash + ); + Self::mark_failed_and_deprioritize_peer( + &mut self.attempt_failed_tenures, + &mut self.deprioritized_peers, + &downloader.tenure_id_consensus_hash, + &naddr, + ); neighbor_rpc.add_dead(network, &naddr); continue; }; @@ -541,12 +666,16 @@ impl NakamotoTenureDownloaderSet { ); new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); if downloader.is_done() { + info!( + "Downloader for tenure {} is finished", + &downloader.tenure_id_consensus_hash + ); debug!( - "Downloader for {} on tenure {} is finished", - &naddr, &downloader.tenure_id_consensus_hash + "Downloader for tenure {} finished on {naddr}", + &downloader.tenure_id_consensus_hash, ); finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); + finished_tenures.push(CompletedTenure::from(downloader)); continue; } } @@ -554,12 +683,12 @@ impl NakamotoTenureDownloaderSet { // clear dead, broken, and done for naddr in addrs.iter() { if neighbor_rpc.is_dead_or_broken(network, naddr) { - debug!("Remove dead/broken downloader for {}", &naddr); + debug!("Remove dead/broken downloader for {naddr}"); self.clear_downloader(naddr); } } for done_naddr in finished.drain(..) { - debug!("Remove finished downloader for {}", &done_naddr); + debug!("Remove finished downloader for {done_naddr}"); self.clear_downloader(&done_naddr); } for done_tenure in finished_tenures.drain(..) { diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs index ddfd35fa97..9a9ee51b07 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs @@ -735,7 +735,9 @@ impl NakamotoUnconfirmedTenureDownloader { ); let ntd = NakamotoTenureDownloader::new( tenure_tip.parent_consensus_hash.clone(), + tenure_tip.consensus_hash.clone(), tenure_tip.parent_tenure_start_block_id.clone(), + tenure_tip.consensus_hash.clone(), tenure_tip.tenure_start_block_id.clone(), self.naddr.clone(), confirmed_signer_keys.clone(), @@ -777,6 +779,44 @@ impl NakamotoUnconfirmedTenureDownloader { } } + /// Advance the state of the downloader from chainstate, if possible. + /// For example, a tenure-start block may have been pushed to us already (or it + /// may be a shadow block) + pub fn try_advance_from_chainstate( + &mut self, + chainstate: &StacksChainState, + ) -> Result<(), NetError> { + loop { + match self.state { + NakamotoUnconfirmedDownloadState::GetTenureInfo => { + // gotta send that request + break; + } + NakamotoUnconfirmedDownloadState::GetTenureStartBlock(start_block_id) => { + // if we have this, then load it up + let Some((tenure_start_block, _sz)) = chainstate + .nakamoto_blocks_db() + .get_nakamoto_block(&start_block_id)? + else { + break; + }; + self.try_accept_unconfirmed_tenure_start_block(tenure_start_block)?; + if let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) = &self.state { + break; + } + } + NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { + // TODO: look at the chainstate and find out what we don't have to download + break; + } + NakamotoUnconfirmedDownloadState::Done => { + break; + } + } + } + Ok(()) + } + /// Begin the next download request for this state machine. /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The /// caller should try this again until it gets one of the other possible return values. It's diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index c58355a6a9..9b2dd1e106 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1197,8 +1197,9 @@ impl StacksHttp { let (response_preamble, response_contents) = match request_result { Ok((rp, rc)) => (rp, rc), Err(NetError::Http(e)) => { + debug!("RPC handler for {} failed: {:?}", decoded_path, &e); return StacksHttpResponse::new_error(&request_preamble, &*e.into_http_error()) - .try_into_contents() + .try_into_contents(); } Err(e) => { warn!("Irrecoverable error when handling request"; "path" => %request_preamble.path_and_query_str, "error" => %e); diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 3f4fcb6165..e832b70184 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -407,16 +407,13 @@ impl InvGenerator { let cur_sortition_info = self.get_sortition_info(sortdb, &cur_consensus_hash)?; let parent_sortition_consensus_hash = cur_sortition_info.parent_consensus_hash; - debug!("Get sortition and tenure info for height {}. cur_consensus_hash = {}, cur_tenure_info = {:?}, parent_sortition_consensus_hash = {}", cur_height, &cur_consensus_hash, &cur_tenure_opt, &parent_sortition_consensus_hash); + trace!("Get sortition and tenure info for height {cur_height}. cur_consensus_hash = {cur_consensus_hash}, cur_tenure_info = {cur_tenure_opt:?}, parent_sortition_consensus_hash = {parent_sortition_consensus_hash}"); if let Some(cur_tenure_info) = cur_tenure_opt.as_ref() { // a tenure was active when this sortition happened... if cur_tenure_info.tenure_id_consensus_hash == cur_consensus_hash { // ...and this tenure started in this sortition - debug!( - "Tenure was started for {} (height {})", - cur_consensus_hash, cur_height - ); + trace!("Tenure was started for {cur_consensus_hash} (height {cur_height})"); tenure_status.push(true); cur_tenure_opt = self.get_processed_tenure( chainstate, @@ -426,19 +423,13 @@ impl InvGenerator { )?; } else { // ...but this tenure did not start in this sortition - debug!( - "Tenure was NOT started for {} (bit {})", - cur_consensus_hash, cur_height - ); + trace!("Tenure was NOT started for {cur_consensus_hash} (bit {cur_height})"); tenure_status.push(false); } } else { // no active tenure during this sortition. Check the parent sortition to see if a // tenure begain there. - debug!( - "No winning sortition for {} (bit {})", - cur_consensus_hash, cur_height - ); + trace!("No winning sortition for {cur_consensus_hash} (bit {cur_height})"); tenure_status.push(false); cur_tenure_opt = self.get_processed_tenure( chainstate, @@ -457,9 +448,9 @@ impl InvGenerator { } tenure_status.reverse(); - debug!( - "Tenure bits off of {} and {}: {:?}", - nakamoto_tip, &tip.consensus_hash, &tenure_status + trace!( + "Tenure bits off of {nakamoto_tip} and {}: {tenure_status:?}", + &tip.consensus_hash ); Ok(tenure_status) } @@ -579,10 +570,10 @@ impl NakamotoTenureInv { /// Reset synchronization state for this peer. Don't remove inventory data; just make it so we /// can talk to the peer again - pub fn try_reset_comms(&mut self, inv_sync_interval: u64, start_rc: u64, cur_rc: u64) { + pub fn try_reset_comms(&mut self, inv_sync_interval: u64, start_rc: u64, max_rc: u64) { let now = get_epoch_time_secs(); if self.start_sync_time + inv_sync_interval <= now - && (self.cur_reward_cycle >= cur_rc || !self.online) + && (self.cur_reward_cycle >= max_rc || !self.online) { self.reset_comms(start_rc); } @@ -618,20 +609,20 @@ impl NakamotoTenureInv { pub fn getnakamotoinv_begin( &mut self, network: &mut PeerNetwork, - current_reward_cycle: u64, + max_reward_cycle: u64, ) -> bool { debug!( "{:?}: Begin Nakamoto inventory sync for {} in cycle {}", network.get_local_peer(), self.neighbor_address, - current_reward_cycle, + max_reward_cycle, ); // possibly reset communications with this peer, if it's time to do so. self.try_reset_comms( network.get_connection_opts().inv_sync_interval, - current_reward_cycle.saturating_sub(network.get_connection_opts().inv_reward_cycles), - current_reward_cycle, + max_reward_cycle.saturating_sub(network.get_connection_opts().inv_reward_cycles), + max_reward_cycle, ); if !self.is_online() { // don't talk to this peer for now @@ -643,7 +634,7 @@ impl NakamotoTenureInv { return false; } - if self.reward_cycle() > current_reward_cycle { + if self.reward_cycle() > max_reward_cycle { // we've fully sync'ed with this peer debug!( "{:?}: fully sync'ed: {}", @@ -669,10 +660,12 @@ impl NakamotoTenureInv { match reply.payload { StacksMessageType::NakamotoInv(inv_data) => { debug!( - "{:?}: got NakamotoInv: {:?}", + "{:?}: got NakamotoInv from {:?}: {:?}", network.get_local_peer(), + &self.neighbor_address, &inv_data ); + let ret = self.merge_tenure_inv(inv_data.tenures, self.reward_cycle()); self.next_reward_cycle(); return Ok(ret); @@ -908,10 +901,24 @@ impl NakamotoInvStateMachine { ) }); - // try to get all of the reward cycles we know about, plus the next one. We try to get - // the next one as well in case we're at a reward cycle boundary, but we're not at the - // chain tip -- the block downloader still needs that next inventory to proceed. - let proceed = inv.getnakamotoinv_begin(network, current_reward_cycle.saturating_add(1)); + let burnchain_tip_reward_cycle = sortdb + .pox_constants + .block_height_to_reward_cycle( + sortdb.first_block_height, + network.stacks_tip.burnchain_height, + ) + .ok_or(NetError::ChainstateError( + "block height comes before system start".into(), + ))?; + + let max_reward_cycle = if burnchain_tip_reward_cycle > current_reward_cycle { + // try to sync up to the next reward cycle + current_reward_cycle.saturating_add(1) + } else { + current_reward_cycle + }; + + let proceed = inv.getnakamotoinv_begin(network, max_reward_cycle); let inv_rc = inv.reward_cycle(); new_inventories.insert(naddr.clone(), inv); @@ -946,6 +953,7 @@ impl NakamotoInvStateMachine { "peer" => ?naddr, "error" => ?e ); + continue; } } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 2210160bee..89e56fe29c 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -16,7 +16,6 @@ #[warn(unused_imports)] use std::collections::HashMap; -#[cfg(any(test, feature = "testing"))] use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::io::prelude::*; @@ -1466,7 +1465,7 @@ pub const DENY_BAN_DURATION: u64 = 86400; // seconds (1 day) pub const DENY_MIN_BAN_DURATION: u64 = 2; /// Result of doing network work -#[derive(Clone)] +#[derive(Clone, PartialEq, Debug)] pub struct NetworkResult { /// Stacks chain tip when we began this pass pub stacks_tip: StacksBlockId, @@ -1516,6 +1515,10 @@ pub struct NetworkResult { pub num_connected_peers: usize, /// The observed burnchain height pub burn_height: u64, + /// The observed stacks coinbase height + pub coinbase_height: u64, + /// The observed stacks tip height (different in Nakamoto from coinbase height) + pub stacks_tip_height: u64, /// The consensus hash of the stacks tip (prefixed `rc_` for historical reasons) pub rc_consensus_hash: ConsensusHash, /// The current StackerDB configs @@ -1530,6 +1533,8 @@ impl NetworkResult { num_download_passes: u64, num_connected_peers: usize, burn_height: u64, + coinbase_height: u64, + stacks_tip_height: u64, rc_consensus_hash: ConsensusHash, stacker_db_configs: HashMap, ) -> NetworkResult { @@ -1558,11 +1563,509 @@ impl NetworkResult { num_download_passes: num_download_passes, num_connected_peers, burn_height, + coinbase_height, + stacks_tip_height, rc_consensus_hash, stacker_db_configs, } } + /// Get the set of all StacksBlocks represented + fn all_block_ids(&self) -> HashSet { + let mut blocks: HashSet<_> = self + .blocks + .iter() + .map(|(ch, blk, _)| StacksBlockId::new(&ch, &blk.block_hash())) + .collect(); + + let pushed_blocks: HashSet<_> = self + .pushed_blocks + .iter() + .map(|(_, block_list)| { + block_list + .iter() + .map(|block_data| { + block_data + .blocks + .iter() + .map(|block_datum| { + StacksBlockId::new(&block_datum.0, &block_datum.1.block_hash()) + }) + .collect::>() + }) + .flatten() + }) + .flatten() + .collect(); + + let uploaded_blocks: HashSet<_> = self + .uploaded_blocks + .iter() + .map(|blk_data| { + blk_data + .blocks + .iter() + .map(|blk| StacksBlockId::new(&blk.0, &blk.1.block_hash())) + }) + .flatten() + .collect(); + + blocks.extend(pushed_blocks.into_iter()); + blocks.extend(uploaded_blocks.into_iter()); + blocks + } + + /// Get the set of all microblocks represented + fn all_microblock_hashes(&self) -> HashSet { + let mut mblocks: HashSet<_> = self + .confirmed_microblocks + .iter() + .map(|(_, mblocks, _)| mblocks.iter().map(|mblk| mblk.block_hash())) + .flatten() + .collect(); + + let pushed_microblocks: HashSet<_> = self + .pushed_microblocks + .iter() + .map(|(_, mblock_list)| { + mblock_list + .iter() + .map(|(_, mblock_data)| { + mblock_data + .microblocks + .iter() + .map(|mblock| mblock.block_hash()) + }) + .flatten() + }) + .flatten() + .collect(); + + let uploaded_microblocks: HashSet<_> = self + .uploaded_microblocks + .iter() + .map(|mblk_data| mblk_data.microblocks.iter().map(|mblk| mblk.block_hash())) + .flatten() + .collect(); + + mblocks.extend(pushed_microblocks.into_iter()); + mblocks.extend(uploaded_microblocks.into_iter()); + mblocks + } + + /// Get the set of all nakamoto blocks represented + fn all_nakamoto_block_ids(&self) -> HashSet { + let mut naka_block_ids: HashSet<_> = self + .nakamoto_blocks + .iter() + .map(|(_, nblk)| nblk.block_id()) + .collect(); + + let pushed_nakamoto_blocks: HashSet<_> = self + .pushed_nakamoto_blocks + .iter() + .map(|(_, naka_blocks_list)| { + naka_blocks_list + .iter() + .map(|(_, naka_blocks)| { + naka_blocks + .blocks + .iter() + .map(|nblk| nblk.block_id()) + .collect::>() + }) + .collect::>>() + }) + .collect::>>>() + .into_iter() + .flatten() + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }); + + let uploaded_nakamoto_blocks: HashSet<_> = self + .uploaded_nakamoto_blocks + .iter() + .map(|nblk| nblk.block_id()) + .collect(); + + naka_block_ids.extend(pushed_nakamoto_blocks.into_iter()); + naka_block_ids.extend(uploaded_nakamoto_blocks.into_iter()); + naka_block_ids + } + + /// Get the set of all txids represented + fn all_txids(&self) -> HashSet { + let mut txids: HashSet<_> = self + .uploaded_transactions + .iter() + .map(|tx| tx.txid()) + .collect(); + let pushed_txids: HashSet<_> = self + .pushed_transactions + .iter() + .map(|(_, tx_list)| { + tx_list + .iter() + .map(|(_, tx)| tx.txid()) + .collect::>() + }) + .collect::>>() + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }); + + let synced_txids: HashSet<_> = self + .synced_transactions + .iter() + .map(|tx| tx.txid()) + .collect(); + + txids.extend(pushed_txids.into_iter()); + txids.extend(synced_txids.into_iter()); + txids + } + + /// Get all unhandled message signatures. + /// This is unique per message. + fn all_msg_sigs(&self) -> HashSet { + self.unhandled_messages + .iter() + .map(|(_, msgs)| { + msgs.iter() + .map(|msg| msg.preamble.signature.clone()) + .collect::>() + }) + .into_iter() + .fold(HashSet::new(), |mut acc, next| { + acc.extend(next.into_iter()); + acc + }) + } + + /// Merge self into `newer`, and return `newer`. + /// deduplicate messages when possible. + pub fn update(mut self, mut newer: NetworkResult) -> Self { + // merge unhandled messaegs, but deduplicate + let newer_msgs = newer.all_msg_sigs(); + for (nk, mut msgs) in self.unhandled_messages.drain() { + msgs.retain(|msg| { + let retain = !newer_msgs.contains(&msg.preamble.signature); + if !retain { + debug!( + "Drop duplicate p2p message {} seq {}", + &msg.get_message_name(), + &msg.preamble.seq + ); + } + retain + }); + if let Some(newer_msgs) = newer.unhandled_messages.get_mut(&nk) { + newer_msgs.append(&mut msgs); + } else { + newer.unhandled_messages.insert(nk, msgs); + } + } + + let newer_blocks = newer.all_block_ids(); + let newer_microblocks = newer.all_microblock_hashes(); + let newer_naka_blocks = newer.all_nakamoto_block_ids(); + let newer_txids = newer.all_txids(); + + // only retain blocks not found in `newer` + self.blocks.retain(|(ch, blk, _)| { + let block_id = StacksBlockId::new(&ch, &blk.block_hash()); + let retain = !newer_blocks.contains(&block_id); + if !retain { + debug!("Drop duplicate downloaded block {}", &block_id); + } + retain + }); + newer.blocks.append(&mut self.blocks); + + // merge microblocks, but deduplicate + self.confirmed_microblocks + .retain_mut(|(_, ref mut mblocks, _)| { + mblocks.retain(|mblk| { + let retain = !newer_microblocks.contains(&mblk.block_hash()); + if !retain { + debug!( + "Drop duplicate downloaded microblock {}", + &mblk.block_hash() + ); + } + retain + }); + mblocks.len() > 0 + }); + newer + .confirmed_microblocks + .append(&mut self.confirmed_microblocks); + + // merge nakamoto blocks, but deduplicate + self.nakamoto_blocks.retain(|_, nblk| { + let retain = !newer_naka_blocks.contains(&nblk.block_id()); + if !retain { + debug!( + "Drop duplicate downloaded nakamoto block {}", + &nblk.block_id() + ); + } + retain + }); + newer.nakamoto_blocks.extend(self.nakamoto_blocks.drain()); + + // merge pushed transactions, but deduplicate + for (nk, mut tx_data) in self.pushed_transactions.drain() { + tx_data.retain(|(_, tx)| { + let retain = !newer_txids.contains(&tx.txid()); + if !retain { + debug!("Drop duplicate pushed transaction {}", &tx.txid()); + } + retain + }); + if tx_data.len() == 0 { + continue; + } + + if let Some(newer_tx_data) = newer.pushed_transactions.get_mut(&nk) { + newer_tx_data.append(&mut tx_data); + } else { + newer.pushed_transactions.insert(nk, tx_data); + } + } + + // merge pushed blocks, but deduplicate + for (nk, mut block_list) in self.pushed_blocks.drain() { + block_list.retain_mut(|ref mut block_data| { + block_data.blocks.retain(|blk_datum| { + let block_id = StacksBlockId::new(&blk_datum.0, &blk_datum.1.block_hash()); + let retain = !newer_blocks.contains(&block_id); + if !retain { + debug!("Drop duplicate pushed block {}", &block_id); + } + retain + }); + block_data.blocks.len() > 0 + }); + if block_list.len() == 0 { + continue; + } + + if let Some(newer_block_data) = newer.pushed_blocks.get_mut(&nk) { + newer_block_data.append(&mut block_list); + } else { + newer.pushed_blocks.insert(nk, block_list); + } + } + + // merge pushed microblocks, but deduplicate + for (nk, mut microblock_data) in self.pushed_microblocks.drain() { + microblock_data.retain_mut(|(_, ref mut mblock_data)| { + mblock_data.microblocks.retain(|mblk| { + let retain = !newer_microblocks.contains(&mblk.block_hash()); + if !retain { + debug!("Drop duplicate pushed microblock {}", &mblk.block_hash()); + } + retain + }); + mblock_data.microblocks.len() > 0 + }); + if microblock_data.len() == 0 { + continue; + } + + if let Some(newer_microblock_data) = newer.pushed_microblocks.get_mut(&nk) { + newer_microblock_data.append(&mut microblock_data); + } else { + newer.pushed_microblocks.insert(nk, microblock_data); + } + } + + // merge pushed nakamoto blocks, but deduplicate + for (nk, mut nakamoto_block_data) in self.pushed_nakamoto_blocks.drain() { + nakamoto_block_data.retain_mut(|(_, ref mut naka_blocks)| { + naka_blocks.blocks.retain(|nblk| { + let retain = !newer_naka_blocks.contains(&nblk.block_id()); + if !retain { + debug!("Drop duplicate pushed nakamoto block {}", &nblk.block_id()); + } + retain + }); + naka_blocks.blocks.len() > 0 + }); + if nakamoto_block_data.len() == 0 { + continue; + } + + if let Some(newer_nakamoto_data) = newer.pushed_nakamoto_blocks.get_mut(&nk) { + newer_nakamoto_data.append(&mut nakamoto_block_data); + } else { + newer.pushed_nakamoto_blocks.insert(nk, nakamoto_block_data); + } + } + + // merge uploaded data, but deduplicate + self.uploaded_transactions.retain(|tx| { + let retain = !newer_txids.contains(&tx.txid()); + if !retain { + debug!("Drop duplicate uploaded transaction {}", &tx.txid()); + } + retain + }); + self.uploaded_blocks.retain_mut(|ref mut blk_data| { + blk_data.blocks.retain(|blk| { + let block_id = StacksBlockId::new(&blk.0, &blk.1.block_hash()); + let retain = !newer_blocks.contains(&block_id); + if !retain { + debug!("Drop duplicate uploaded block {}", &block_id); + } + retain + }); + + blk_data.blocks.len() > 0 + }); + self.uploaded_microblocks.retain_mut(|ref mut mblock_data| { + mblock_data.microblocks.retain(|mblk| { + let retain = !newer_microblocks.contains(&mblk.block_hash()); + if !retain { + debug!("Drop duplicate uploaded microblock {}", &mblk.block_hash()); + } + retain + }); + + mblock_data.microblocks.len() > 0 + }); + self.uploaded_nakamoto_blocks.retain(|nblk| { + let retain = !newer_naka_blocks.contains(&nblk.block_id()); + if !retain { + debug!( + "Drop duplicate uploaded nakamoto block {}", + &nblk.block_id() + ); + } + retain + }); + + newer + .uploaded_transactions + .append(&mut self.uploaded_transactions); + newer.uploaded_blocks.append(&mut self.uploaded_blocks); + newer + .uploaded_microblocks + .append(&mut self.uploaded_microblocks); + newer + .uploaded_nakamoto_blocks + .append(&mut self.uploaded_nakamoto_blocks); + + // merge uploaded/pushed stackerdb, but drop stale versions + let newer_stackerdb_chunk_versions: HashMap<_, _> = newer + .uploaded_stackerdb_chunks + .iter() + .chain(newer.pushed_stackerdb_chunks.iter()) + .map(|chunk| { + ( + ( + chunk.contract_id.clone(), + chunk.rc_consensus_hash.clone(), + chunk.chunk_data.slot_id, + ), + chunk.chunk_data.slot_version, + ) + }) + .collect(); + + self.uploaded_stackerdb_chunks.retain(|push_chunk| { + if push_chunk.rc_consensus_hash != newer.rc_consensus_hash { + debug!( + "Drop pushed StackerDB chunk for {} due to stale view ({} != {}): {:?}", + &push_chunk.contract_id, + &push_chunk.rc_consensus_hash, + &newer.rc_consensus_hash, + &push_chunk.chunk_data + ); + return false; + } + if let Some(version) = newer_stackerdb_chunk_versions.get(&( + push_chunk.contract_id.clone(), + push_chunk.rc_consensus_hash.clone(), + push_chunk.chunk_data.slot_id, + )) { + let retain = push_chunk.chunk_data.slot_version > *version; + if !retain { + debug!( + "Drop pushed StackerDB chunk for {} due to stale version: {:?}", + &push_chunk.contract_id, &push_chunk.chunk_data + ); + } + retain + } else { + true + } + }); + + self.pushed_stackerdb_chunks.retain(|push_chunk| { + if push_chunk.rc_consensus_hash != newer.rc_consensus_hash { + debug!( + "Drop uploaded StackerDB chunk for {} due to stale view ({} != {}): {:?}", + &push_chunk.contract_id, + &push_chunk.rc_consensus_hash, + &newer.rc_consensus_hash, + &push_chunk.chunk_data + ); + return false; + } + if let Some(version) = newer_stackerdb_chunk_versions.get(&( + push_chunk.contract_id.clone(), + push_chunk.rc_consensus_hash.clone(), + push_chunk.chunk_data.slot_id, + )) { + let retain = push_chunk.chunk_data.slot_version > *version; + if !retain { + debug!( + "Drop uploaded StackerDB chunk for {} due to stale version: {:?}", + &push_chunk.contract_id, &push_chunk.chunk_data + ); + } + retain + } else { + true + } + }); + + newer + .uploaded_stackerdb_chunks + .append(&mut self.uploaded_stackerdb_chunks); + newer + .pushed_stackerdb_chunks + .append(&mut self.pushed_stackerdb_chunks); + + // dedup sync'ed transactions + self.synced_transactions.retain(|tx| { + let retain = !newer_txids.contains(&tx.txid()); + if !retain { + debug!("Drop duplicate sync'ed transaction {}", &tx.txid()); + } + retain + }); + + newer + .synced_transactions + .append(&mut self.synced_transactions); + + // no dedup here, but do merge + newer + .stacker_db_sync_results + .append(&mut self.stacker_db_sync_results); + newer.attachments.append(&mut self.attachments); + + newer + } + pub fn has_blocks(&self) -> bool { self.blocks.len() > 0 || self.pushed_blocks.len() > 0 } @@ -1616,6 +2119,10 @@ impl NetworkResult { || self.has_stackerdb_chunks() } + pub fn has_block_data_to_store(&self) -> bool { + self.has_blocks() || self.has_microblocks() || self.has_nakamoto_blocks() + } + pub fn consume_unsolicited(&mut self, unhandled_messages: PendingMessages) { for ((_event_id, neighbor_key), messages) in unhandled_messages.into_iter() { for message in messages.into_iter() { @@ -1734,6 +2241,7 @@ pub mod test { use clarity::boot_util::boot_code_id; use clarity::types::sqlite::NO_PARAMS; + use clarity::vm::ast::parser::v1::CONTRACT_MAX_NAME_LENGTH; use clarity::vm::ast::ASTRules; use clarity::vm::costs::ExecutionCost; use clarity::vm::database::STXBalance; @@ -1782,7 +2290,7 @@ pub mod test { use crate::chainstate::stacks::{StacksMicroblockHeader, *}; use crate::chainstate::*; use crate::clarity::vm::clarity::TransactionConnection; - use crate::core::{StacksEpoch, StacksEpochExtension, NETWORK_P2P_PORT}; + use crate::core::{EpochList, StacksEpoch, StacksEpochExtension, NETWORK_P2P_PORT}; use crate::net::asn::*; use crate::net::atlas::*; use crate::net::chat::*; @@ -2087,7 +2595,7 @@ pub mod test { pub initial_lockups: Vec, pub spending_account: TestMiner, pub setup_code: String, - pub epochs: Option>, + pub epochs: Option, /// If some(), TestPeer should check the PoX-2 invariants /// on cycle numbers bounded (inclusive) by the supplied u64s pub check_pox_invariants: Option<(u64, u64)>, @@ -2486,7 +2994,17 @@ pub mod test { let smart_contract = TransactionPayload::SmartContract( TransactionSmartContract { name: ContractName::try_from( - conf.test_name.replace("::", "-").to_string(), + conf.test_name + .replace("::", "-") + .chars() + .skip( + conf.test_name + .len() + .saturating_sub(CONTRACT_MAX_NAME_LENGTH), + ) + .collect::() + .trim_start_matches(|c: char| !c.is_alphabetic()) + .to_string(), ) .expect("FATAL: invalid boot-code contract name"), code_body: StacksString::from_str(&conf.setup_code) @@ -2633,10 +3151,13 @@ pub mod test { let stackerdb_contracts: Vec<_> = stacker_db_syncs.keys().map(|cid| cid.clone()).collect(); + let burnchain_db = config.burnchain.open_burnchain_db(false).unwrap(); + let mut peer_network = PeerNetwork::new( peerdb, atlasdb, p2p_stacker_dbs, + burnchain_db, local_peer, config.peer_version, config.burnchain.clone(), @@ -2914,8 +3435,6 @@ pub mod test { let mut stacks_node = self.stacks_node.take().unwrap(); let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); - let old_tip = self.network.stacks_tip.clone(); - self.network .refresh_burnchain_view(&indexer, &sortdb, &mut stacks_node.chainstate, false) .unwrap(); @@ -2924,6 +3443,28 @@ pub mod test { self.stacks_node = Some(stacks_node); } + pub fn refresh_reward_cycles(&mut self) { + let sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.stacks_node.take().unwrap(); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let tip_block_id = self.network.stacks_tip.block_id(); + let tip_height = self.network.stacks_tip.height; + + self.network + .refresh_reward_cycles( + &sortdb, + &mut stacks_node.chainstate, + &tip, + &tip_block_id, + tip_height, + ) + .unwrap(); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(stacks_node); + } + pub fn for_each_convo_p2p(&mut self, mut f: F) -> Vec> where F: FnMut(usize, &mut ConversationP2P) -> Result, @@ -3520,6 +4061,22 @@ pub mod test { self.sortdb.as_ref().unwrap() } + pub fn with_dbs(&mut self, f: F) -> R + where + F: FnOnce(&mut TestPeer, &mut SortitionDB, &mut TestStacksNode, &mut MemPoolDB) -> R, + { + let mut sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.stacks_node.take().unwrap(); + let mut mempool = self.mempool.take().unwrap(); + + let res = f(self, &mut sortdb, &mut stacks_node, &mut mempool); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + self.mempool = Some(mempool); + res + } + pub fn with_db_state(&mut self, f: F) -> Result where F: FnOnce( @@ -4185,6 +4742,9 @@ pub mod test { all_blocks: Vec, expected_siblings: usize, ) { + if !self.mine_malleablized_blocks { + return; + } for block in all_blocks.iter() { let sighash = block.header.signer_signature_hash(); let siblings = self diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 054fefaf1d..71ca82f8bf 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -50,7 +50,7 @@ use crate::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use crate::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState}; use crate::chainstate::stacks::{StacksBlockHeader, MAX_BLOCK_LEN, MAX_TRANSACTION_LEN}; -use crate::core::StacksEpoch; +use crate::core::{EpochList, StacksEpoch}; use crate::monitoring::{update_inbound_neighbors, update_outbound_neighbors}; use crate::net::asn::ASEntry4; use crate::net::atlas::{AtlasDB, AttachmentInstance, AttachmentsDownloader}; @@ -243,11 +243,18 @@ impl CurrentRewardSet { /// Cached stacks chain tip info, consumed by RPC endpoints #[derive(Clone, Debug, PartialEq)] pub struct StacksTipInfo { + /// consensus hash of the highest processed stacks block pub consensus_hash: ConsensusHash, + /// block hash of the highest processed stacks block pub block_hash: BlockHeaderHash, + /// height of the highest processed stacks block pub height: u64, + /// coinbase height of the highest processed tenure pub coinbase_height: u64, + /// whether or not the system has transitioned to Nakamoto pub is_nakamoto: bool, + /// highest burnchain block discovered + pub burnchain_height: u64, } impl StacksTipInfo { @@ -258,6 +265,7 @@ impl StacksTipInfo { height: 0, coinbase_height: 0, is_nakamoto: false, + burnchain_height: 0, } } @@ -269,7 +277,7 @@ impl StacksTipInfo { pub struct PeerNetwork { // constants pub peer_version: u32, - pub epochs: Vec, + pub epochs: EpochList, // refreshed when peer key expires pub local_peer: LocalPeer, @@ -306,6 +314,9 @@ pub struct PeerNetwork { pub peerdb: PeerDB, pub atlasdb: AtlasDB, + // handle to burnchain DB + pub burnchain_db: BurnchainDB, + // ongoing p2p conversations (either they reached out to us, or we to them) pub peers: PeerMap, pub sockets: HashMap, @@ -444,6 +455,7 @@ impl PeerNetwork { peerdb: PeerDB, atlasdb: AtlasDB, stackerdbs: StackerDBs, + burnchain_db: BurnchainDB, mut local_peer: LocalPeer, peer_version: u32, burnchain: Burnchain, @@ -453,7 +465,7 @@ impl PeerNetwork { QualifiedContractIdentifier, (StackerDBConfig, StackerDBSync), >, - epochs: Vec, + epochs: EpochList, ) -> PeerNetwork { let http = HttpPeer::new( connection_opts.clone(), @@ -509,6 +521,8 @@ impl PeerNetwork { peerdb, atlasdb, + burnchain_db, + peers: PeerMap::new(), sockets: HashMap::new(), events: HashMap::new(), @@ -625,26 +639,14 @@ impl PeerNetwork { /// Get an epoch at a burn block height pub fn get_epoch_at_burn_height(&self, burn_height: u64) -> StacksEpoch { - let epoch_index = StacksEpoch::find_epoch(&self.epochs, burn_height) - .unwrap_or_else(|| panic!("BUG: block {} is not in a known epoch", burn_height,)); - let epoch = self - .epochs - .get(epoch_index) - .expect("BUG: no epoch at found index") - .clone(); - epoch + self.epochs + .epoch_at_height(burn_height) + .unwrap_or_else(|| panic!("BUG: block {} is not in a known epoch", burn_height)) } /// Get an epoch by epoch ID pub fn get_epoch_by_epoch_id(&self, epoch_id: StacksEpochId) -> StacksEpoch { - let epoch_index = StacksEpoch::find_epoch_by_id(&self.epochs, epoch_id) - .unwrap_or_else(|| panic!("BUG: epoch {} is not in a known epoch", epoch_id,)); - let epoch = self - .epochs - .get(epoch_index) - .expect("BUG: no epoch at found index") - .clone(); - epoch + self.epochs[epoch_id].clone() } /// Do something with the HTTP peer. @@ -1808,7 +1810,11 @@ impl PeerNetwork { }; match self.can_register_peer(&neighbor_key, outbound) { - Ok(_) => {} + Ok(_) => { + info!("Neighbor accepted!"; + "public key" => ?pubkey_opt, + "address" => %neighbor_key.addrbytes); + } Err(e) => { debug!( "{:?}: Could not register peer {:?}: {:?}", @@ -1903,6 +1909,11 @@ impl PeerNetwork { for (nk, pubkh) in nk_remove.into_iter() { // remove event state self.events.remove(&nk); + info!("Dropping neighbor!"; + "event id" => %event_id, + "public address" => %pubkh, + "public key" => %nk.addrbytes + ); // remove inventory state if let Some(inv_state) = self.inv_state.as_mut() { @@ -4257,6 +4268,7 @@ impl PeerNetwork { .anchored_header .as_stacks_nakamoto() .is_some(), + burnchain_height: self.stacks_tip.burnchain_height, }; debug!( "{:?}: Parent Stacks tip off of {} is {:?}", @@ -4280,18 +4292,88 @@ impl PeerNetwork { } } + /// Determine if we need to invalidate a given cached reward set. + /// + /// In Epoch 2, this requires checking the first sortition in the start of the reward set's + /// reward phase. + /// + /// In Nakamoto, this requires checking the anchor block in the prepare phase for the upcoming + /// reward phase. + fn check_reload_cached_reward_set( + &self, + sortdb: &SortitionDB, + chainstate: &StacksChainState, + rc: u64, + tip_sn: &BlockSnapshot, + tip_block_id: &StacksBlockId, + tip_height: u64, + ) -> Result { + let epoch = self.get_epoch_at_burn_height(tip_sn.block_height); + if epoch.epoch_id >= StacksEpochId::Epoch30 { + // epoch 3, where there are no forks except from bugs or burnchain reorgs. + // invalidate reward cycles on burnchain or stacks reorg, should they ever happen + let reorg = Self::is_reorg(Some(&self.burnchain_tip), tip_sn, sortdb) + || Self::is_nakamoto_reorg( + &self.stacks_tip.block_id(), + self.stacks_tip.height, + tip_block_id, + tip_height, + chainstate, + ); + return Ok(reorg); + } else { + // epoch 2 + // NOTE: + 1 needed because the sortition db indexes anchor blocks at index height 1, + // not 0 + let ih = sortdb.index_handle(&tip_sn.sortition_id); + let rc_start_height = self.burnchain.nakamoto_first_block_of_cycle(rc) + 1; + let Some(ancestor_sort_id) = + get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? + else { + // reward cycle is too far back for there to be an ancestor, so no need to + // reload + test_debug!( + "No ancestor sortition ID off of {} (height {}) at {rc_start_height})", + &tip_sn.sortition_id, + tip_sn.block_height + ); + return Ok(false); + }; + let ancestor_ih = sortdb.index_handle(&ancestor_sort_id); + let anchor_hash_opt = ancestor_ih.get_last_anchor_block_hash()?; + + if let Some(cached_rc_info) = self.current_reward_sets.get(&rc) { + if let Some(anchor_hash) = anchor_hash_opt.as_ref() { + // careful -- the sortition DB stores a StacksBlockId's value (the tenure-start + // StacksBlockId) as a BlockHeaderHash, since that's what it was designed to + // deal with in the pre-Nakamoto days + if cached_rc_info.anchor_block_id() == StacksBlockId(anchor_hash.0.clone()) + || cached_rc_info.anchor_block_hash == *anchor_hash + { + // cached reward set data is still valid + test_debug!("Cached reward cycle {rc} is still valid"); + return Ok(false); + } + } + } + } + + Ok(true) + } + /// Refresh our view of the last three reward cycles /// This ensures that the PeerNetwork has cached copies of the reward cycle data (including the /// signing set) for the current, previous, and previous-previous reward cycles. This data is /// in turn consumed by the Nakamoto block downloader, which must validate blocks signed from /// any of these reward cycles. #[cfg_attr(test, mutants::skip)] - fn refresh_reward_cycles( + pub fn refresh_reward_cycles( &mut self, sortdb: &SortitionDB, chainstate: &mut StacksChainState, tip_sn: &BlockSnapshot, tip_block_id: &StacksBlockId, + tip_height: u64, ) -> Result<(), net_error> { let cur_rc = self .burnchain @@ -4300,35 +4382,22 @@ impl PeerNetwork { let prev_rc = cur_rc.saturating_sub(1); let prev_prev_rc = prev_rc.saturating_sub(1); - let ih = sortdb.index_handle(&tip_sn.sortition_id); for rc in [cur_rc, prev_rc, prev_prev_rc] { debug!("Refresh reward cycle info for cycle {}", rc); - let rc_start_height = self.burnchain.nakamoto_first_block_of_cycle(rc); - let Some(ancestor_sort_id) = - get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? - else { - // reward cycle is too far back for there to be an ancestor + if self.current_reward_sets.contains_key(&rc) + && !self.check_reload_cached_reward_set( + sortdb, + chainstate, + rc, + tip_sn, + tip_block_id, + tip_height, + )? + { continue; - }; - let ancestor_ih = sortdb.index_handle(&ancestor_sort_id); - let anchor_hash_opt = ancestor_ih.get_last_anchor_block_hash()?; - - if let Some(cached_rc_info) = self.current_reward_sets.get(&rc) { - if let Some(anchor_hash) = anchor_hash_opt.as_ref() { - // careful -- the sortition DB stores a StacksBlockId's value (the tenure-start - // StacksBlockId) as a BlockHeaderHash, since that's what it was designed to - // deal with in the pre-Nakamoto days - if cached_rc_info.anchor_block_id() == StacksBlockId(anchor_hash.0.clone()) - || cached_rc_info.anchor_block_hash == *anchor_hash - { - // cached reward set data is still valid - continue; - } - } } - - debug!("Load reward cycle info for cycle {}", rc); + debug!("Refresh reward cycle info for cycle {rc}"); let Some((reward_set_info, anchor_block_header)) = load_nakamoto_reward_set( rc, &tip_sn.sortition_id, @@ -4385,6 +4454,7 @@ impl PeerNetwork { let (stacks_tip_ch, stacks_tip_bhh, stacks_tip_height) = SortitionDB::get_canonical_stacks_chain_tip_hash_and_height(sortdb.conn())?; + let new_burnchain_tip = self.burnchain_db.get_canonical_chain_tip()?; let burnchain_tip_changed = canonical_sn.block_height != self.chain_view.burn_block_height || self.num_state_machine_passes == 0 || canonical_sn.sortition_id != self.burnchain_tip.sortition_id; @@ -4434,6 +4504,7 @@ impl PeerNetwork { chainstate, &canonical_sn, &new_stacks_tip_block_id, + stacks_tip_height, )?; } @@ -4463,6 +4534,7 @@ impl PeerNetwork { height: 0, coinbase_height: 0, is_nakamoto: false, + burnchain_height: 0, } } Err(e) => return Err(e), @@ -4534,12 +4606,10 @@ impl PeerNetwork { if self.get_current_epoch().epoch_id < StacksEpochId::Epoch30 { // update heaviest affirmation map view - let burnchain_db = self.burnchain.open_burnchain_db(false)?; - self.heaviest_affirmation_map = static_get_heaviest_affirmation_map( &self.burnchain, indexer, - &burnchain_db, + &self.burnchain_db, sortdb, &canonical_sn.sortition_id, ) @@ -4550,7 +4620,7 @@ impl PeerNetwork { self.tentative_best_affirmation_map = static_get_canonical_affirmation_map( &self.burnchain, indexer, - &burnchain_db, + &self.burnchain_db, sortdb, chainstate, &canonical_sn.sortition_id, @@ -4591,9 +4661,8 @@ impl PeerNetwork { if stacks_tip_changed && self.get_current_epoch().epoch_id < StacksEpochId::Epoch30 { // update stacks tip affirmation map view // (NOTE: this check has to happen _after_ self.chain_view gets updated!) - let burnchain_db = self.burnchain.open_burnchain_db(false)?; self.stacks_tip_affirmation_map = static_get_stacks_tip_affirmation_map( - &burnchain_db, + &self.burnchain_db, sortdb, &canonical_sn.sortition_id, &canonical_sn.canonical_stacks_tip_consensus_hash, @@ -4633,7 +4702,7 @@ impl PeerNetwork { debug!( "{:?}: handle unsolicited stacks messages: tenure changed {} != {}, {} buffered", self.get_local_peer(), - &self.burnchain_tip.consensus_hash, + &self.stacks_tip.consensus_hash, &canonical_sn.consensus_hash, self.pending_stacks_messages .iter() @@ -4659,8 +4728,10 @@ impl PeerNetwork { height: stacks_tip_height, coinbase_height, is_nakamoto: stacks_tip_is_nakamoto, + burnchain_height: new_burnchain_tip.block_height, }; self.parent_stacks_tip = parent_stacks_tip; + self.parent_stacks_tip.burnchain_height = new_burnchain_tip.block_height; debug!( "{:?}: canonical Stacks tip is now {:?}", @@ -4733,7 +4804,6 @@ impl PeerNetwork { ibd, true, ); - let unhandled_messages = self.handle_unsolicited_stacks_messages(chainstate, unhandled_messages, true); @@ -4980,7 +5050,7 @@ impl PeerNetwork { Ok(()) } - /// Static helper to check to see if there has been a reorg + /// Static helper to check to see if there has been a burnchain reorg pub fn is_reorg( last_sort_tip: Option<&BlockSnapshot>, sort_tip: &BlockSnapshot, @@ -5003,15 +5073,15 @@ impl PeerNetwork { { // current and previous sortition tips are at the same height, but represent different // blocks. - debug!( - "Reorg detected at burn height {}: {} != {}", + info!( + "Burnchain reorg detected at burn height {}: {} != {}", sort_tip.block_height, &last_sort_tip.consensus_hash, &sort_tip.consensus_hash ); return true; } // It will never be the case that the last and current tip have different heights, but the - // smae consensus hash. If they have the same height, then we would have already returned + // same consensus hash. If they have the same height, then we would have already returned // since we've handled both the == and != cases for their consensus hashes. So if we reach // this point, the heights and consensus hashes are not equal. We only need to check that // last_sort_tip is an ancestor of sort_tip @@ -5043,6 +5113,60 @@ impl PeerNetwork { false } + /// Static helper to check to see if there has been a Nakamoto reorg. + /// Return true if there's a Nakamoto reorg + /// Return false otherwise. + pub fn is_nakamoto_reorg( + last_stacks_tip: &StacksBlockId, + last_stacks_tip_height: u64, + stacks_tip: &StacksBlockId, + stacks_tip_height: u64, + chainstate: &StacksChainState, + ) -> bool { + if last_stacks_tip == stacks_tip { + // same tip + return false; + } + + if last_stacks_tip_height == stacks_tip_height && last_stacks_tip != stacks_tip { + // last block is a sibling + info!( + "Stacks reorg detected at stacks height {last_stacks_tip_height}: {last_stacks_tip} != {stacks_tip}", + ); + return true; + } + + if stacks_tip_height < last_stacks_tip_height { + info!( + "Stacks reorg (chain shrink) detected at stacks height {last_stacks_tip_height}: {last_stacks_tip} != {stacks_tip}", + ); + return true; + } + + // It will never be the case that the last and current tip have different heights, but the + // same block ID. If they have the same height, then we would have already returned + // since we've handled both the == and != cases for their block IDs. So if we reach + // this point, the heights and block IDs are not equal. We only need to check that + // last_stacks_tip is an ancestor of stacks_tip + + let mut cursor = stacks_tip.clone(); + for _ in last_stacks_tip_height..stacks_tip_height { + let Ok(Some(parent_id)) = + NakamotoChainState::get_nakamoto_parent_block_id(chainstate.db(), &cursor) + else { + error!("Failed to load parent id of {cursor}"); + return true; + }; + cursor = parent_id; + } + + debug!("is_nakamoto_reorg check"; + "parent_id" => %cursor, + "last_stacks_tip" => %last_stacks_tip); + + cursor != *last_stacks_tip + } + /// Log our neighbors. /// Used for testing and debuggin fn log_neighbors(&mut self) { @@ -5125,6 +5249,10 @@ impl PeerNetwork { } }; + test_debug!( + "unsolicited_buffered_messages = {:?}", + &unsolicited_buffered_messages + ); let mut network_result = NetworkResult::new( self.stacks_tip.block_id(), self.num_state_machine_passes, @@ -5132,6 +5260,8 @@ impl PeerNetwork { self.num_downloader_passes, self.peers.len(), self.chain_view.burn_block_height, + self.stacks_tip.coinbase_height, + self.stacks_tip.height, self.chain_view.rc_consensus_hash.clone(), self.get_stacker_db_configs_owned(), ); @@ -5266,7 +5396,7 @@ mod test { network_id: 0x9abcdef0, chain_name: "bitcoin".to_string(), network_name: "testnet".to_string(), - working_dir: "/nope".to_string(), + working_dir: ":memory:".to_string(), consensus_hash_lifetime: 24, stable_confirmations: 7, initial_reward_start_block: 50, @@ -5297,12 +5427,14 @@ mod test { let atlas_config = AtlasConfig::new(false); let atlasdb = AtlasDB::connect_memory(atlas_config).unwrap(); let stacker_db = StackerDBs::connect_memory(); + let burnchain_db = BurnchainDB::connect(":memory:", &burnchain, true).unwrap(); let local_peer = PeerDB::get_local_peer(db.conn()).unwrap(); let p2p = PeerNetwork::new( db, atlasdb, stacker_db, + burnchain_db, local_peer, 0x12345678, burnchain, diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 575e96138e..cb7d310321 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -933,6 +933,11 @@ impl Relayer { &obtained_method; "block_id" => %block.header.block_id(), ); + if block.is_shadow_block() { + // drop, since we can get these from ourselves when downloading a tenure that ends in + // a shadow block. + return Ok(BlockAcceptResponse::AlreadyStored); + } if fault_injection::ignore_block(block.header.chain_length, &burnchain.working_dir) { return Ok(BlockAcceptResponse::Rejected( @@ -1703,6 +1708,7 @@ impl Relayer { sortdb: &mut SortitionDB, chainstate: &mut StacksChainState, coord_comms: Option<&CoordinatorChannels>, + reject_blocks_pushed: bool, ) -> Result<(Vec, Vec), net_error> { let mut pushed_blocks = vec![]; let mut bad_neighbors = vec![]; @@ -1731,6 +1737,14 @@ impl Relayer { for nakamoto_block in nakamoto_blocks_data.blocks.drain(..) { let block_id = nakamoto_block.block_id(); + if reject_blocks_pushed { + debug!( + "Received pushed Nakamoto block {} from {}, but configured to reject it.", + block_id, neighbor_key + ); + continue; + } + debug!( "Received pushed Nakamoto block {} from {}", block_id, neighbor_key @@ -2092,6 +2106,7 @@ impl Relayer { /// Returns the list of Nakamoto blocks we stored, as well as the list of bad neighbors that /// sent us invalid blocks. pub fn process_new_nakamoto_blocks( + connection_opts: &ConnectionOptions, network_result: &mut NetworkResult, burnchain: &Burnchain, sortdb: &mut SortitionDB, @@ -2128,6 +2143,7 @@ impl Relayer { sortdb, chainstate, coord_comms, + connection_opts.reject_blocks_pushed, ) { Ok(x) => x, Err(e) => { @@ -2311,8 +2327,6 @@ impl Relayer { event_observer, )?; - update_stacks_tip_height(chain_height as i64); - Ok(ret) } @@ -2499,14 +2513,27 @@ impl Relayer { for chunk in sync_result.chunks_to_store.into_iter() { let md = chunk.get_slot_metadata(); if let Err(e) = tx.try_replace_chunk(&sc, &md, &chunk.data) { - warn!( - "Failed to store chunk for StackerDB"; - "stackerdb_contract_id" => &format!("{}", &sync_result.contract_id), - "slot_id" => md.slot_id, - "slot_version" => md.slot_version, - "num_bytes" => chunk.data.len(), - "error" => %e - ); + if matches!(e, Error::StaleChunk { .. }) { + // This is a common and expected message, so log it as a debug and with a sep message + // to distinguish it from other message types. + debug!( + "Dropping stale StackerDB chunk"; + "stackerdb_contract_id" => &format!("{}", &sync_result.contract_id), + "slot_id" => md.slot_id, + "slot_version" => md.slot_version, + "num_bytes" => chunk.data.len(), + "error" => %e + ); + } else { + warn!( + "Failed to store chunk for StackerDB"; + "stackerdb_contract_id" => &format!("{}", &sync_result.contract_id), + "slot_id" => md.slot_id, + "slot_version" => md.slot_version, + "num_bytes" => chunk.data.len(), + "error" => %e + ); + } continue; } else { debug!("Stored chunk"; "stackerdb_contract_id" => &format!("{}", &sync_result.contract_id), "slot_id" => md.slot_id, "slot_version" => md.slot_version); @@ -2835,6 +2862,7 @@ impl Relayer { coord_comms: Option<&CoordinatorChannels>, ) -> u64 { let (accepted_blocks, bad_neighbors) = match Self::process_new_nakamoto_blocks( + &self.connection_opts, network_result, burnchain, sortdb, @@ -3009,6 +3037,10 @@ impl Relayer { event_observer.map(|obs| obs.as_stackerdb_event_dispatcher()), )?; + update_stacks_tip_height( + i64::try_from(network_result.stacks_tip_height).unwrap_or(i64::MAX), + ); + let receipts = ProcessedNetReceipts { mempool_txs_added, processed_unconfirmed_state, diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index 3c1fec15c8..3a44de7953 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -115,8 +115,6 @@ pub struct ConversationHttp { pending_request: Option, /// outstanding response pending_response: Option, - /// whether or not there's an error response pending - pending_error_response: bool, /// how much data to buffer (i.e. the socket's send buffer size) socket_send_buffer_size: u32, } @@ -166,7 +164,6 @@ impl ConversationHttp { canonical_stacks_tip_height: None, pending_request: None, pending_response: None, - pending_error_response: false, keep_alive: true, total_request_count: 0, total_reply_count: 0, @@ -228,15 +225,6 @@ impl ConversationHttp { ); return Err(net_error::InProgress); } - if self.pending_error_response { - test_debug!( - "{:?},id={}: Error response is inflight", - &self.peer_host, - self.conn_id - ); - return Err(net_error::InProgress); - } - let handle = self.start_request(req)?; self.pending_request = Some(handle); @@ -255,12 +243,12 @@ impl ConversationHttp { ); return Err(net_error::InProgress); } - if self.pending_error_response { - // error already in-flight - return Ok(()); - } + let (mut preamble, body_contents) = res.try_into_contents()?; + preamble.content_length = body_contents.content_length(); + preamble.keep_alive = false; - let (preamble, body_contents) = res.try_into_contents()?; + // account for the request + self.total_request_count += 1; // make the relay handle. There may not have been a valid request in the first place, so // we'll use a relay handle (not a reply handle) to push out the error. @@ -269,7 +257,6 @@ impl ConversationHttp { // queue up the HTTP headers, and then stream back the body. preamble.consensus_serialize(&mut reply)?; self.reply_streams.push_back((reply, body_contents, false)); - self.pending_error_response = true; Ok(()) } @@ -388,11 +375,12 @@ impl ConversationHttp { if broken || (drained_handle && drained_stream) { // done with this stream test_debug!( - "{:?}: done with stream (broken={}, drained_handle={}, drained_stream={})", + "{:?}: done with stream (broken={}, drained_handle={}, drained_stream={}, do_keep_alive={})", &self, broken, drained_handle, - drained_stream + drained_stream, + do_keep_alive, ); self.total_reply_count += 1; self.reply_streams.pop_front(); @@ -482,6 +470,14 @@ impl ConversationHttp { /// Is the connection idle? pub fn is_idle(&self) -> bool { + test_debug!( + "{:?} is_idle? {},{},{},{}", + self, + self.pending_response.is_none(), + self.connection.inbox_len(), + self.connection.outbox_len(), + self.reply_streams.len() + ); self.pending_response.is_none() && self.connection.inbox_len() == 0 && self.connection.outbox_len() == 0 @@ -491,9 +487,13 @@ impl ConversationHttp { /// Is the conversation out of pending data? /// Don't consider it drained if we haven't received anything yet pub fn is_drained(&self) -> bool { - ((self.total_request_count > 0 && self.total_reply_count > 0) - || self.pending_error_response) - && self.is_idle() + test_debug!( + "{:?} is_drained? {},{}", + self, + self.total_request_count, + self.total_reply_count + ); + self.total_request_count > 0 && self.total_reply_count > 0 && self.is_idle() } /// Should the connection be kept alive even if drained? @@ -523,11 +523,6 @@ impl ConversationHttp { &mut self, node: &mut StacksNodeState, ) -> Result, net_error> { - // if we have an in-flight error, then don't take any more requests. - if self.pending_error_response { - return Ok(vec![]); - } - // handle in-bound HTTP request(s) let num_inbound = self.connection.inbox_len(); let mut ret = vec![]; @@ -568,7 +563,6 @@ impl ConversationHttp { } StacksHttpMessage::Error(path, resp) => { // new request, but resulted in an error when parsing it - self.total_request_count += 1; self.last_request_timestamp = get_epoch_time_secs(); let start_time = Instant::now(); self.reply_error(resp)?; diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 3849b9b058..fdad3b85df 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -288,6 +288,7 @@ impl HttpPeer { /// Deregister a socket/event pair #[cfg_attr(test, mutants::skip)] pub fn deregister_http(&mut self, network_state: &mut NetworkState, event_id: usize) -> () { + test_debug!("Remove HTTP event {}", event_id); self.peers.remove(&event_id); match self.sockets.remove(&event_id) { @@ -456,7 +457,7 @@ impl HttpPeer { "Failed to flush HTTP 400 to socket {:?}: {:?}", &client_sock, &e ); - convo_dead = true; + // convo_dead = true; } } Err(e) => { @@ -559,19 +560,11 @@ impl HttpPeer { let mut to_remove = vec![]; let mut msgs = vec![]; for event_id in &poll_state.ready { - if !self.sockets.contains_key(&event_id) { + let Some(client_sock) = self.sockets.get_mut(&event_id) else { debug!("Rogue socket event {}", event_id); to_remove.push(*event_id); continue; - } - - let client_sock_opt = self.sockets.get_mut(&event_id); - if client_sock_opt.is_none() { - debug!("No such socket event {}", event_id); - to_remove.push(*event_id); - continue; - } - let client_sock = client_sock_opt.unwrap(); + }; match self.peers.get_mut(event_id) { Some(ref mut convo) => { diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 57d1a427dc..bbbec21290 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -155,7 +155,7 @@ pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; pub const MINER_SLOT_COUNT: u32 = 2; /// Final result of synchronizing state with a remote set of DB replicas -#[derive(Clone)] +#[derive(Clone, PartialEq, Debug)] pub struct StackerDBSyncResult { /// which contract this is a replica for pub contract_id: QualifiedContractIdentifier, diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index e2bea6fd50..a479dad07a 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -33,18 +33,23 @@ use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::SortitionHandle; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::test_signers::TestSigners; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use crate::chainstate::nakamoto::{ + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, +}; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::{ - CoinbasePayload, StacksTransaction, TenureChangeCause, TenureChangePayload, TokenTransferMemo, - TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionVersion, + CoinbasePayload, Error as ChainstateError, StacksTransaction, TenureChangeCause, + TenureChangePayload, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, + TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::net::api::gettenureinfo::RPCGetTenureInfo; use crate::net::download::nakamoto::{TenureStartEnd, WantedTenure, *}; use crate::net::inv::nakamoto::NakamotoTenureInv; -use crate::net::test::{dns_thread_start, TestEventObserver}; -use crate::net::tests::inv::nakamoto::{make_nakamoto_peer_from_invs, peer_get_nakamoto_invs}; +use crate::net::test::{dns_thread_start, to_addr, TestEventObserver}; +use crate::net::tests::inv::nakamoto::{ + make_nakamoto_peer_from_invs, make_nakamoto_peers_from_invs_ext, peer_get_nakamoto_invs, +}; use crate::net::tests::{NakamotoBootPlan, TestPeer}; use crate::net::{Error as NetError, Hash160, NeighborAddress, SortitionDB}; use crate::stacks_common::types::Address; @@ -97,6 +102,45 @@ impl NakamotoDownloadStateMachine { } } +impl<'a> NakamotoStagingBlocksConnRef<'a> { + pub fn load_nakamoto_tenure( + &self, + tip: &StacksBlockId, + ) -> Result>, ChainstateError> { + let Some((block, ..)) = self.get_nakamoto_block(tip)? else { + return Ok(None); + }; + if block.is_wellformed_tenure_start_block().map_err(|_| { + ChainstateError::InvalidStacksBlock("Malformed tenure-start block".into()) + })? { + // we're done + return Ok(Some(vec![block])); + } + + // this is an intermediate block + let mut tenure = vec![]; + let mut cursor = block.header.parent_block_id.clone(); + tenure.push(block); + loop { + let Some((block, _)) = self.get_nakamoto_block(&cursor)? else { + return Ok(None); + }; + + let is_tenure_start = block.is_wellformed_tenure_start_block().map_err(|e| { + ChainstateError::InvalidStacksBlock("Malformed tenure-start block".into()) + })?; + cursor = block.header.parent_block_id.clone(); + tenure.push(block); + + if is_tenure_start { + break; + } + } + tenure.reverse(); + Ok(Some(tenure)) + } +} + #[test] fn test_nakamoto_tenure_downloader() { let ch = ConsensusHash([0x11; 20]); @@ -240,8 +284,10 @@ fn test_nakamoto_tenure_downloader() { }; let mut td = NakamotoTenureDownloader::new( + tenure_start_block.header.consensus_hash.clone(), tenure_start_block.header.consensus_hash.clone(), tenure_start_block.header.block_id(), + next_tenure_start_block.header.consensus_hash.clone(), next_tenure_start_block.header.block_id(), naddr.clone(), reward_set.clone(), @@ -361,6 +407,7 @@ fn test_nakamoto_unconfirmed_tenure_downloader() { ); let (mut peer, reward_cycle_invs) = peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); + peer.mine_malleablized_blocks = false; let nakamoto_start = NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); @@ -2161,7 +2208,9 @@ fn test_nakamoto_download_run_2_peers() { "Booting peer's stacks tip is now {:?}", &boot_peer.network.stacks_tip ); - if stacks_tip_ch == canonical_stacks_tip_ch { + if stacks_tip_ch == canonical_stacks_tip_ch + && stacks_tip_bhh == canonical_stacks_tip_bhh + { break; } } @@ -2249,6 +2298,793 @@ fn test_nakamoto_unconfirmed_download_run_2_peers() { let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); + // start running that peer so we can boot off of it + let (term_sx, term_rx) = sync_channel(1); + thread::scope(|s| { + s.spawn(move || { + let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + loop { + boot_peer + .run_with_ibd(true, Some(&mut boot_dns_client)) + .unwrap(); + + let (stacks_tip_ch, stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + + last_stacks_tip_ch = stacks_tip_ch; + last_stacks_tip_bhh = stacks_tip_bhh; + + debug!( + "Booting peer's stacks tip is now {:?}", + &boot_peer.network.stacks_tip + ); + if stacks_tip_ch == canonical_stacks_tip_ch + && stacks_tip_bhh == canonical_stacks_tip_bhh + { + break; + } + } + + term_sx.send(()).unwrap(); + }); + + loop { + if term_rx.try_recv().is_ok() { + break; + } + peer.step_with_ibd(false).unwrap(); + } + }); + + boot_dns_thread_handle.join().unwrap(); +} + +/// Test the case where one or more blocks from tenure _T_ get orphend by a tenure-start block in +/// tenure _T + 1_. The unconfirmed downloader should be able to handle this case. +#[test] +fn test_nakamoto_microfork_download_run_2_peers() { + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + + let observer = TestEventObserver::new(); + let bitvecs = vec![ + // full rc + vec![true, true, true, true, true, true, true, true, true, true], + ]; + + let rc_len = 10u64; + + let (mut peer, _) = make_nakamoto_peers_from_invs_ext( + function_name!(), + &observer, + bitvecs.clone(), + |boot_plan| { + boot_plan + .with_pox_constants(rc_len as u32, 5) + .with_extra_peers(0) + .with_initial_balances(initial_balances) + .with_malleablized_blocks(false) + }, + ); + peer.refresh_burnchain_view(); + + let nakamoto_start = + NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + + // create a microfork + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); + let naka_tip = peer.network.stacks_tip.block_id(); + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + + let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) + .unwrap() + .unwrap(); + + // load the full tenure for this tip + let mut naka_tip_tenure = chainstate + .nakamoto_blocks_db() + .load_nakamoto_tenure(&naka_tip) + .unwrap() + .unwrap(); + + assert!(naka_tip_tenure.len() > 1); + + // make a microfork -- orphan naka_tip_tenure.last() + naka_tip_tenure.pop(); + + debug!("test: mine off of tenure"); + debug!( + "test: first {}: {:?}", + &naka_tip_tenure.first().as_ref().unwrap().block_id(), + &naka_tip_tenure.first().as_ref().unwrap() + ); + debug!( + "test: last {}: {:?}", + &naka_tip_tenure.last().as_ref().unwrap().block_id(), + &naka_tip_tenure.last().as_ref().unwrap() + ); + + peer.mine_nakamoto_on(naka_tip_tenure); + let (fork_naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: produced fork {}: {:?}", + &fork_naka_block.block_id(), + &fork_naka_block + ); + + peer.refresh_burnchain_view(); + + peer.mine_nakamoto_on(vec![fork_naka_block.clone()]); + let (fork_naka_block_2, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: confirmed fork with {}: {:?}", + &fork_naka_block_2.block_id(), + &fork_naka_block_2 + ); + + peer.refresh_burnchain_view(); + + // get reward cyclce data + let (mut peer, reward_cycle_invs) = + peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); + + // make a neighbor from this peer + let boot_observer = TestEventObserver::new(); + let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); + let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); + + let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + + let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); + let nakamoto_tip = peer + .sortdb() + .index_handle(&tip.sortition_id) + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + + assert_eq!(tip.block_height, 53); + + // boot up the boot peer's burnchain + for height in 25..tip.block_height { + let ops = peer + .get_burnchain_block_ops_at_height(height + 1) + .unwrap_or(vec![]); + let sn = { + let ih = peer.sortdb().index_handle(&tip.sortition_id); + let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); + sn + }; + test_debug!( + "boot_peer tip height={} hash={}", + sn.block_height, + &sn.burn_header_hash + ); + test_debug!("ops = {:?}", &ops); + let block_header = TestPeer::make_next_burnchain_block( + &boot_peer.config.burnchain, + sn.block_height, + &sn.burn_header_hash, + ops.len() as u64, + false, + ); + TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + } + + let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); + + // start running that peer so we can boot off of it + let (term_sx, term_rx) = sync_channel(1); + thread::scope(|s| { + s.spawn(move || { + let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + loop { + boot_peer + .run_with_ibd(true, Some(&mut boot_dns_client)) + .unwrap(); + + let (stacks_tip_ch, stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + + last_stacks_tip_ch = stacks_tip_ch; + last_stacks_tip_bhh = stacks_tip_bhh; + + debug!( + "Booting peer's stacks tip is now {:?}", + &boot_peer.network.stacks_tip + ); + if stacks_tip_ch == canonical_stacks_tip_ch + && stacks_tip_bhh == canonical_stacks_tip_bhh + { + break; + } + } + + term_sx.send(()).unwrap(); + }); + + loop { + if term_rx.try_recv().is_ok() { + break; + } + peer.step_with_ibd(false).unwrap(); + } + }); + + boot_dns_thread_handle.join().unwrap(); +} + +/// Test booting up a node where there is one shadow block in the prepare phase, as well as some +/// blocks that mine atop it. +#[test] +fn test_nakamoto_download_run_2_peers_with_one_shadow_block() { + let observer = TestEventObserver::new(); + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + let bitvecs = vec![vec![true, true, false, false]]; + + let rc_len = 10u64; + let (mut peer, _) = make_nakamoto_peers_from_invs_ext( + function_name!(), + &observer, + bitvecs.clone(), + |boot_plan| { + boot_plan + .with_pox_constants(rc_len as u32, 5) + .with_extra_peers(0) + .with_initial_balances(initial_balances) + .with_malleablized_blocks(false) + }, + ); + peer.refresh_burnchain_view(); + let (mut peer, reward_cycle_invs) = + peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); + + let nakamoto_start = + NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + + // create a shadow block + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); + let naka_tip = peer.network.stacks_tip.block_id(); + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + + let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) + .unwrap() + .unwrap(); + + let naka_tip_tenure = chainstate + .nakamoto_blocks_db() + .load_nakamoto_tenure(&naka_tip) + .unwrap() + .unwrap(); + + assert!(naka_tip_tenure.len() > 1); + + peer.mine_nakamoto_on(naka_tip_tenure); + let shadow_block = peer.make_shadow_tenure(None); + debug!( + "test: produced shadow block {}: {:?}", + &shadow_block.block_id(), + &shadow_block + ); + + peer.refresh_burnchain_view(); + + peer.mine_nakamoto_on(vec![shadow_block.clone()]); + let (next_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: confirmed shadow block with {}: {:?}", + &next_block.block_id(), + &next_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![next_block.clone()]); + + for _ in 0..9 { + let (next_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + debug!( + "test: confirmed shadow block with {}: {:?}", + &next_block.block_id(), + &next_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![next_block.clone()]); + } + + let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); + let nakamoto_tip = peer + .sortdb() + .index_handle(&tip.sortition_id) + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + + /* + assert_eq!( + tip.block_height, + 56 + ); + */ + + // make a neighbor from this peer + let boot_observer = TestEventObserver::new(); + let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); + let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); + + let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + + // boot up the boot peer's burnchain + for height in 25..tip.block_height { + let ops = peer + .get_burnchain_block_ops_at_height(height + 1) + .unwrap_or(vec![]); + let sn = { + let ih = peer.sortdb().index_handle(&tip.sortition_id); + let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); + sn + }; + test_debug!( + "boot_peer tip height={} hash={}", + sn.block_height, + &sn.burn_header_hash + ); + test_debug!("ops = {:?}", &ops); + let block_header = TestPeer::make_next_burnchain_block( + &boot_peer.config.burnchain, + sn.block_height, + &sn.burn_header_hash, + ops.len() as u64, + false, + ); + TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + } + + { + let mut node = boot_peer.stacks_node.take().unwrap(); + let tx = node.chainstate.staging_db_tx_begin().unwrap(); + tx.add_shadow_block(&shadow_block).unwrap(); + tx.commit().unwrap(); + boot_peer.stacks_node = Some(node); + } + + let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); + + // start running that peer so we can boot off of it + let (term_sx, term_rx) = sync_channel(1); + thread::scope(|s| { + s.spawn(move || { + let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + loop { + boot_peer + .run_with_ibd(true, Some(&mut boot_dns_client)) + .unwrap(); + + let (stacks_tip_ch, stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + + last_stacks_tip_ch = stacks_tip_ch; + last_stacks_tip_bhh = stacks_tip_bhh; + + debug!( + "Booting peer's stacks tip is now {:?}", + &boot_peer.network.stacks_tip + ); + if stacks_tip_ch == canonical_stacks_tip_ch { + break; + } + } + + term_sx.send(()).unwrap(); + }); + + loop { + if term_rx.try_recv().is_ok() { + break; + } + peer.step_with_ibd(false).unwrap(); + } + }); + + boot_dns_thread_handle.join().unwrap(); +} + +/// Test booting up a node where the whole prepare phase is shadow blocks +#[test] +fn test_nakamoto_download_run_2_peers_shadow_prepare_phase() { + let observer = TestEventObserver::new(); + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + let bitvecs = vec![vec![true, true]]; + + let rc_len = 10u64; + let (mut peer, _) = make_nakamoto_peers_from_invs_ext( + function_name!(), + &observer, + bitvecs.clone(), + |boot_plan| { + boot_plan + .with_pox_constants(rc_len as u32, 5) + .with_extra_peers(0) + .with_initial_balances(initial_balances) + .with_malleablized_blocks(false) + }, + ); + peer.refresh_burnchain_view(); + let (mut peer, reward_cycle_invs) = + peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); + + let nakamoto_start = + NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + + // create a shadow block + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); + let naka_tip = peer.network.stacks_tip.block_id(); + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + + let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) + .unwrap() + .unwrap(); + + let naka_tip_tenure = chainstate + .nakamoto_blocks_db() + .load_nakamoto_tenure(&naka_tip) + .unwrap() + .unwrap(); + + assert!(naka_tip_tenure.len() > 1); + + peer.mine_nakamoto_on(naka_tip_tenure); + + let mut shadow_blocks = vec![]; + for _ in 0..10 { + let shadow_block = peer.make_shadow_tenure(None); + debug!( + "test: produced shadow block {}: {:?}", + &shadow_block.block_id(), + &shadow_block + ); + shadow_blocks.push(shadow_block.clone()); + peer.refresh_burnchain_view(); + + peer.mine_nakamoto_on(vec![shadow_block.clone()]); + } + + match peer.single_block_tenure_fallible(&sender_key, |_| {}, |_| {}, |_| true) { + Ok((next_block, ..)) => { + debug!( + "test: confirmed shadow block with {}: {:?}", + &next_block.block_id(), + &next_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![next_block.clone()]); + } + Err(ChainstateError::NoSuchBlockError) => { + // tried to mine but our commit was invalid (e.g. because we haven't mined often + // enough) + peer.refresh_burnchain_view(); + } + Err(e) => { + panic!("FATAL: {:?}", &e); + } + }; + + for _ in 0..10 { + let (next_block, ..) = + match peer.single_block_tenure_fallible(&sender_key, |_| {}, |_| {}, |_| true) { + Ok(x) => x, + Err(ChainstateError::NoSuchBlockError) => { + // tried to mine but our commit was invalid (e.g. because we haven't mined often + // enough) + peer.refresh_burnchain_view(); + continue; + } + Err(e) => { + panic!("FATAL: {:?}", &e); + } + }; + + debug!( + "test: confirmed shadow block with {}: {:?}", + &next_block.block_id(), + &next_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![next_block.clone()]); + } + + let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); + let nakamoto_tip = peer + .sortdb() + .index_handle(&tip.sortition_id) + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + + // make a neighbor from this peer + let boot_observer = TestEventObserver::new(); + let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); + let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); + + let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + + // boot up the boot peer's burnchain + for height in 25..tip.block_height { + let ops = peer + .get_burnchain_block_ops_at_height(height + 1) + .unwrap_or(vec![]); + let sn = { + let ih = peer.sortdb().index_handle(&tip.sortition_id); + let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); + sn + }; + test_debug!( + "boot_peer tip height={} hash={}", + sn.block_height, + &sn.burn_header_hash + ); + test_debug!("ops = {:?}", &ops); + let block_header = TestPeer::make_next_burnchain_block( + &boot_peer.config.burnchain, + sn.block_height, + &sn.burn_header_hash, + ops.len() as u64, + false, + ); + TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + } + { + let mut node = boot_peer.stacks_node.take().unwrap(); + let tx = node.chainstate.staging_db_tx_begin().unwrap(); + for shadow_block in shadow_blocks.into_iter() { + tx.add_shadow_block(&shadow_block).unwrap(); + } + tx.commit().unwrap(); + boot_peer.stacks_node = Some(node); + } + + let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); + + // start running that peer so we can boot off of it + let (term_sx, term_rx) = sync_channel(1); + thread::scope(|s| { + s.spawn(move || { + let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + loop { + boot_peer + .run_with_ibd(true, Some(&mut boot_dns_client)) + .unwrap(); + + let (stacks_tip_ch, stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) + .unwrap(); + + last_stacks_tip_ch = stacks_tip_ch; + last_stacks_tip_bhh = stacks_tip_bhh; + + debug!( + "Booting peer's stacks tip is now {:?}", + &boot_peer.network.stacks_tip + ); + if stacks_tip_ch == canonical_stacks_tip_ch { + break; + } + } + + term_sx.send(()).unwrap(); + }); + + loop { + if term_rx.try_recv().is_ok() { + break; + } + peer.step_with_ibd(false).unwrap(); + } + }); + + boot_dns_thread_handle.join().unwrap(); +} + +/// Test booting up a node where multiple reward cycles are shadow blocks +#[test] +fn test_nakamoto_download_run_2_peers_shadow_reward_cycles() { + let observer = TestEventObserver::new(); + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + let bitvecs = vec![vec![true, true]]; + + let rc_len = 10u64; + let (mut peer, _) = make_nakamoto_peers_from_invs_ext( + function_name!(), + &observer, + bitvecs.clone(), + |boot_plan| { + boot_plan + .with_pox_constants(rc_len as u32, 5) + .with_extra_peers(0) + .with_initial_balances(initial_balances) + .with_malleablized_blocks(false) + }, + ); + peer.refresh_burnchain_view(); + let (mut peer, reward_cycle_invs) = + peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); + + let nakamoto_start = + NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + + // create a shadow block + let naka_tip_ch = peer.network.stacks_tip.consensus_hash.clone(); + let naka_tip_bh = peer.network.stacks_tip.block_hash.clone(); + let naka_tip = peer.network.stacks_tip.block_id(); + + let sortdb = peer.sortdb_ref().reopen().unwrap(); + let (chainstate, _) = peer.chainstate_ref().reopen().unwrap(); + + let naka_tip_header = NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &naka_tip) + .unwrap() + .unwrap(); + + let naka_tip_tenure = chainstate + .nakamoto_blocks_db() + .load_nakamoto_tenure(&naka_tip) + .unwrap() + .unwrap(); + + assert!(naka_tip_tenure.len() > 1); + + peer.mine_nakamoto_on(naka_tip_tenure); + + let mut shadow_blocks = vec![]; + for _ in 0..30 { + let shadow_block = peer.make_shadow_tenure(None); + debug!( + "test: produced shadow block {}: {:?}", + &shadow_block.block_id(), + &shadow_block + ); + shadow_blocks.push(shadow_block.clone()); + peer.refresh_burnchain_view(); + + peer.mine_nakamoto_on(vec![shadow_block.clone()]); + } + + match peer.single_block_tenure_fallible(&sender_key, |_| {}, |_| {}, |_| true) { + Ok((next_block, ..)) => { + debug!( + "test: confirmed shadow block with {}: {:?}", + &next_block.block_id(), + &next_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![next_block.clone()]); + } + Err(ChainstateError::NoSuchBlockError) => { + // tried to mine but our commit was invalid (e.g. because we haven't mined often + // enough) + peer.refresh_burnchain_view(); + } + Err(e) => { + panic!("FATAL: {:?}", &e); + } + }; + + for _ in 0..10 { + let (next_block, ..) = + match peer.single_block_tenure_fallible(&sender_key, |_| {}, |_| {}, |_| true) { + Ok(x) => x, + Err(ChainstateError::NoSuchBlockError) => { + // tried to mine but our commit was invalid (e.g. because we haven't mined often + // enough) + peer.refresh_burnchain_view(); + continue; + } + Err(e) => { + panic!("FATAL: {:?}", &e); + } + }; + + debug!( + "test: confirmed shadow block with {}: {:?}", + &next_block.block_id(), + &next_block + ); + + peer.refresh_burnchain_view(); + peer.mine_nakamoto_on(vec![next_block.clone()]); + } + + let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); + let nakamoto_tip = peer + .sortdb() + .index_handle(&tip.sortition_id) + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + + assert_eq!(tip.block_height, 84); + + // make a neighbor from this peer + let boot_observer = TestEventObserver::new(); + let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); + let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); + + let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); + + // boot up the boot peer's burnchain + for height in 25..tip.block_height { + let ops = peer + .get_burnchain_block_ops_at_height(height + 1) + .unwrap_or(vec![]); + let sn = { + let ih = peer.sortdb().index_handle(&tip.sortition_id); + let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); + sn + }; + test_debug!( + "boot_peer tip height={} hash={}", + sn.block_height, + &sn.burn_header_hash + ); + test_debug!("ops = {:?}", &ops); + let block_header = TestPeer::make_next_burnchain_block( + &boot_peer.config.burnchain, + sn.block_height, + &sn.burn_header_hash, + ops.len() as u64, + false, + ); + TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); + } + { + let mut node = boot_peer.stacks_node.take().unwrap(); + let tx = node.chainstate.staging_db_tx_begin().unwrap(); + for shadow_block in shadow_blocks.into_iter() { + tx.add_shadow_block(&shadow_block).unwrap(); + } + tx.commit().unwrap(); + boot_peer.stacks_node = Some(node); + } + + let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); + // start running that peer so we can boot off of it let (term_sx, term_rx) = sync_channel(1); thread::scope(|s| { diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index fac9623d3f..5f889cde3e 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -404,15 +404,12 @@ pub fn make_nakamoto_peers_from_invs<'a>( bitvecs: Vec>, num_peers: usize, ) -> (TestPeer<'a>, Vec>) { - inner_make_nakamoto_peers_from_invs( - test_name, - observer, - rc_len, - prepare_len, - bitvecs, - num_peers, - vec![], - ) + make_nakamoto_peers_from_invs_ext(test_name, observer, bitvecs, |boot_plan| { + boot_plan + .with_pox_constants(rc_len, prepare_len) + .with_extra_peers(num_peers) + .with_initial_balances(vec![]) + }) } /// NOTE: The second return value does _not_ need `<'a>`, since `observer` is never installed into @@ -426,31 +423,26 @@ pub fn make_nakamoto_peers_from_invs_and_balances<'a>( num_peers: usize, initial_balances: Vec<(PrincipalData, u64)>, ) -> (TestPeer<'a>, Vec>) { - inner_make_nakamoto_peers_from_invs( - test_name, - observer, - rc_len, - prepare_len, - bitvecs, - num_peers, - initial_balances, - ) + make_nakamoto_peers_from_invs_ext(test_name, observer, bitvecs, |boot_plan| { + boot_plan + .with_pox_constants(rc_len, prepare_len) + .with_extra_peers(num_peers) + .with_initial_balances(initial_balances) + }) } /// Make peers from inventories and balances -fn inner_make_nakamoto_peers_from_invs<'a>( +/// NOTE: The second return value does _not_ need `<'a>`, since `observer` is never installed into +/// the peers here. However, it appears unavoidable to the borrow-checker. +pub fn make_nakamoto_peers_from_invs_ext<'a, F>( test_name: &str, observer: &'a TestEventObserver, - rc_len: u32, - prepare_len: u32, bitvecs: Vec>, - num_peers: usize, - mut initial_balances: Vec<(PrincipalData, u64)>, -) -> (TestPeer<'a>, Vec>) { - for bitvec in bitvecs.iter() { - assert_eq!(bitvec.len() as u32, rc_len); - } - + boot_config: F, +) -> (TestPeer<'a>, Vec>) +where + F: FnOnce(NakamotoBootPlan) -> NakamotoBootPlan, +{ let private_key = StacksPrivateKey::from_seed(&[2]); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, @@ -461,6 +453,7 @@ fn inner_make_nakamoto_peers_from_invs<'a>( .unwrap(); let recipient_addr = StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + let mut initial_balances = vec![(addr.to_account_principal(), 1_000_000)]; let mut sender_nonce = 0; @@ -525,14 +518,13 @@ fn inner_make_nakamoto_peers_from_invs<'a>( 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, ]); - initial_balances.push((addr.into(), 1_000_000)); - let plan = NakamotoBootPlan::new(test_name) - .with_private_key(private_key) - .with_pox_constants(rc_len, prepare_len) - .with_initial_balances(initial_balances) - .with_extra_peers(num_peers) - .with_test_signers(test_signers) - .with_test_stackers(test_stackers); + let mut plan = boot_config( + NakamotoBootPlan::new(test_name) + .with_private_key(private_key) + .with_test_signers(test_signers) + .with_test_stackers(test_stackers), + ); + plan.initial_balances.append(&mut initial_balances); let (peer, other_peers) = plan.boot_into_nakamoto_peers(boot_tenures, Some(observer)); (peer, other_peers) @@ -2382,3 +2374,87 @@ fn test_nakamoto_make_tenure_inv_from_old_tips() { assert_eq!(bits, expected_bits[0..bit_len]); } } + +#[test] +fn test_nakamoto_invs_shadow_blocks() { + let observer = TestEventObserver::new(); + let sender_key = StacksPrivateKey::new(); + let sender_addr = to_addr(&sender_key); + let initial_balances = vec![(sender_addr.to_account_principal(), 1000000000)]; + let mut bitvecs = vec![vec![ + true, true, true, true, true, true, true, true, true, true, + ]]; + + let (mut peer, _) = make_nakamoto_peers_from_invs_and_balances( + function_name!(), + &observer, + 10, + 3, + bitvecs.clone(), + 0, + initial_balances, + ); + let nakamoto_start = + NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); + + let mut expected_ids = vec![]; + + // construct and add shadow blocks to this peer's chainstate + peer.refresh_burnchain_view(); + let shadow_block = peer.make_shadow_tenure(None); + expected_ids.push(shadow_block.block_id()); + peer.mine_nakamoto_on(vec![shadow_block]); + + peer.refresh_burnchain_view(); + let (naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + expected_ids.push(naka_block.block_id()); + peer.mine_nakamoto_on(vec![naka_block]); + + peer.refresh_burnchain_view(); + let shadow_block = peer.make_shadow_tenure(None); + expected_ids.push(shadow_block.block_id()); + peer.mine_nakamoto_on(vec![shadow_block]); + + peer.refresh_burnchain_view(); + let (naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + expected_ids.push(naka_block.block_id()); + peer.mine_nakamoto_on(vec![naka_block]); + + peer.refresh_burnchain_view(); + let shadow_block = peer.make_shadow_tenure(None); + expected_ids.push(shadow_block.block_id()); + peer.mine_nakamoto_on(vec![shadow_block]); + + peer.refresh_burnchain_view(); + let (naka_block, ..) = peer.single_block_tenure(&sender_key, |_| {}, |_| {}, |_| true); + expected_ids.push(naka_block.block_id()); + peer.mine_nakamoto_on(vec![naka_block]); + + let (mut peer, reward_cycle_invs) = + peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); + + // the inv should show `true` for each shadow tenure + bitvecs.push(vec![true, true, true, true, true, true]); + check_inv_messages(bitvecs, 10, nakamoto_start, reward_cycle_invs); + + // shadow blocks are part of the history + peer.refresh_burnchain_view(); + let tip = peer.network.stacks_tip.block_id(); + + let mut stored_block_ids = vec![]; + let mut cursor = tip; + for _ in 0..expected_ids.len() { + let block = peer + .chainstate() + .nakamoto_blocks_db() + .get_nakamoto_block(&cursor) + .unwrap() + .unwrap() + .0; + stored_block_ids.push(block.block_id()); + cursor = block.header.parent_block_id; + } + + stored_block_ids.reverse(); + assert_eq!(stored_block_ids, expected_ids); +} diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 7a44a56788..d3f30aca19 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -973,63 +973,68 @@ pub fn test_mempool_storage_nakamoto() { StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); let mempool_txs = RefCell::new(vec![]); - let blocks_and_sizes = peer.make_nakamoto_tenure_and( - tenure_change_tx, - coinbase_tx, - &mut test_signers, - |_| {}, - |miner, chainstate, sortdb, blocks_so_far| { - let mut txs = vec![]; - if blocks_so_far.len() < num_blocks { - let account = get_account(chainstate, sortdb, &addr); - - let stx_transfer = make_token_transfer( - chainstate, - sortdb, - &private_key, - account.nonce, - 200, - 200, - &recipient_addr, - ); - txs.push(stx_transfer.clone()); - (*mempool_txs.borrow_mut()).push(stx_transfer.clone()); - all_txs.push(stx_transfer.clone()); - } - txs - }, - |_| { - let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + let blocks_and_sizes = peer + .make_nakamoto_tenure_and( + tenure_change_tx, + coinbase_tx, + &mut test_signers, + |_| {}, + |miner, chainstate, sortdb, blocks_so_far| { + let mut txs = vec![]; + if blocks_so_far.len() < num_blocks { + let account = get_account(chainstate, sortdb, &addr); + + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 200, + 200, + &recipient_addr, + ); + txs.push(stx_transfer.clone()); + (*mempool_txs.borrow_mut()).push(stx_transfer.clone()); + all_txs.push(stx_transfer.clone()); + } + txs + }, + |_| { + let tip = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let sort_tip = SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &tip.consensus_hash, + ) .unwrap() .unwrap(); - let sort_tip = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &tip.consensus_hash) + let epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sort_tip.block_height) .unwrap() .unwrap(); - let epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), sort_tip.block_height) - .unwrap() - .unwrap(); - - // submit each transaction to the mempool - for mempool_tx in (*mempool_txs.borrow()).as_slice() { - mempool - .submit( - &mut chainstate, - &sortdb, - &tip.consensus_hash, - &tip.anchored_header.block_hash(), - &mempool_tx, - None, - &epoch.block_limit, - &epoch.epoch_id, - ) - .unwrap(); - } - (*mempool_txs.borrow_mut()).clear(); - true - }, - ); + // submit each transaction to the mempool + for mempool_tx in (*mempool_txs.borrow()).as_slice() { + mempool + .submit( + &mut chainstate, + &sortdb, + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + &mempool_tx, + None, + &epoch.block_limit, + &epoch.epoch_id, + ) + .unwrap(); + } + + (*mempool_txs.borrow_mut()).clear(); + true + }, + ) + .unwrap(); total_blocks += num_blocks; } diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 6e61e7e610..6729dbc4a8 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -22,18 +22,24 @@ pub mod mempool; pub mod neighbors; pub mod relay; -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use clarity::vm::clarity::ClarityConnection; -use clarity::vm::types::PrincipalData; +use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; +use libstackerdb::StackerDBChunkData; use rand::prelude::SliceRandom; use rand::{thread_rng, Rng, RngCore}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; +use stacks_common::bitvec::BitVec; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use stacks_common::types::chainstate::{ - StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, + BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, + StacksPublicKey, TrieHash, }; +use stacks_common::types::net::PeerAddress; use stacks_common::types::{Address, StacksEpochId}; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::VRFProof; use crate::burnchains::PoxConstants; @@ -45,7 +51,7 @@ use crate::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::tests::node::TestStacker; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::test::{ key_to_stacks_addr, make_pox_4_lockup, make_pox_4_lockup_chain_id, make_signer_key_signature, @@ -54,8 +60,10 @@ use crate::chainstate::stacks::boot::test::{ use crate::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; +use crate::chainstate::stacks::db::blocks::test::make_empty_coinbase_block; use crate::chainstate::stacks::db::{MinerPaymentTxFees, StacksAccount, StacksChainState}; use crate::chainstate::stacks::events::TransactionOrigin; +use crate::chainstate::stacks::test::make_codec_test_microblock; use crate::chainstate::stacks::{ CoinbasePayload, StacksTransaction, StacksTransactionSigner, TenureChangeCause, TenureChangePayload, TokenTransferMemo, TransactionAnchorMode, TransactionAuth, @@ -66,6 +74,10 @@ use crate::core::{StacksEpoch, StacksEpochExtension}; use crate::net::relay::{BlockAcceptResponse, Relayer}; use crate::net::stackerdb::StackerDBConfig; use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; +use crate::net::{ + BlocksData, BlocksDatum, MicroblocksData, NakamotoBlocksData, NeighborKey, NetworkResult, + PingData, StackerDBPushChunkData, StacksMessage, StacksMessageType, +}; use crate::util_lib::boot::boot_code_id; use crate::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_signature; @@ -93,6 +105,8 @@ pub struct NakamotoBootPlan { pub num_peers: usize, /// Whether to add an initial balance for `private_key`'s account pub add_default_balance: bool, + /// Whether or not to produce malleablized blocks + pub malleablized_blocks: bool, pub network_id: u32, } @@ -109,6 +123,7 @@ impl NakamotoBootPlan { observer: Some(TestEventObserver::new()), num_peers: 0, add_default_balance: true, + malleablized_blocks: true, network_id: TestPeerConfig::default().network_id, } } @@ -165,6 +180,11 @@ impl NakamotoBootPlan { self } + pub fn with_malleablized_blocks(mut self, malleablized_blocks: bool) -> Self { + self.malleablized_blocks = malleablized_blocks; + self + } + /// This is the first tenure in which nakamoto blocks will be built. /// However, it is also the last sortition for an epoch 2.x block. pub fn nakamoto_start_burn_height(pox_consts: &PoxConstants) -> u64 { @@ -394,6 +414,8 @@ impl NakamotoBootPlan { peer_config.burnchain.pox_constants = self.pox_constants.clone(); let mut peer = TestPeer::new_with_observer(peer_config.clone(), observer); + peer.mine_malleablized_blocks = self.malleablized_blocks; + let mut other_peers = vec![]; for i in 0..self.num_peers { let mut other_config = peer_config.clone(); @@ -404,7 +426,11 @@ impl NakamotoBootPlan { other_config.private_key = StacksPrivateKey::from_seed(&(i as u128).to_be_bytes()); other_config.add_neighbor(&peer.to_neighbor()); - other_peers.push(TestPeer::new_with_observer(other_config, None)); + + let mut other_peer = TestPeer::new_with_observer(other_config, None); + other_peer.mine_malleablized_blocks = self.malleablized_blocks; + + other_peers.push(other_peer); } self.advance_to_nakamoto(&mut peer, &mut other_peers); @@ -525,7 +551,7 @@ impl NakamotoBootPlan { }) .collect(); - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); let mut stacks_block = peer.tenure_with_txs(&stack_txs, &mut peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -533,13 +559,14 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, peer.network.parent_stacks_tip); } for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { - let old_tip = other_peer.network.stacks_tip.clone(); + let mut old_tip = other_peer.network.stacks_tip.clone(); other_peer.tenure_with_txs(&stack_txs, other_peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -548,6 +575,7 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = other_peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, other_peer.network.parent_stacks_tip); } } @@ -560,7 +588,7 @@ impl NakamotoBootPlan { .burnchain .is_in_prepare_phase(sortition_height.into()) { - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); stacks_block = peer.tenure_with_txs(&[], &mut peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -568,13 +596,14 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, peer.network.parent_stacks_tip); } other_peers .iter_mut() .zip(other_peer_nonces.iter_mut()) .for_each(|(peer, nonce)| { - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); peer.tenure_with_txs(&[], nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -583,6 +612,7 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, peer.network.parent_stacks_tip); } }); @@ -595,7 +625,7 @@ impl NakamotoBootPlan { // advance to the start of epoch 3.0 while sortition_height < epoch_30_height - 1 { - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); peer.tenure_with_txs(&vec![], &mut peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -603,13 +633,14 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, peer.network.parent_stacks_tip); } for (other_peer, other_peer_nonce) in other_peers.iter_mut().zip(other_peer_nonces.iter_mut()) { - let old_tip = peer.network.stacks_tip.clone(); + let mut old_tip = peer.network.stacks_tip.clone(); other_peer.tenure_with_txs(&vec![], other_peer_nonce); let (stacks_tip_ch, stacks_tip_bh) = @@ -618,6 +649,8 @@ impl NakamotoBootPlan { let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); assert_eq!(other_peer.network.stacks_tip.block_id(), stacks_tip); if old_tip.block_id() != stacks_tip { + old_tip.burnchain_height = + other_peer.network.parent_stacks_tip.burnchain_height; assert_eq!(old_tip, other_peer.network.parent_stacks_tip); } } @@ -1125,3 +1158,676 @@ fn test_boot_nakamoto_peer() { let observer = TestEventObserver::new(); let (peer, other_peers) = plan.boot_into_nakamoto_peers(boot_tenures, Some(&observer)); } + +#[test] +fn test_network_result_update() { + let mut network_result_1 = NetworkResult::new( + StacksBlockId([0x11; 32]), + 1, + 1, + 1, + 1, + 1, + 1, + 1, + ConsensusHash([0x11; 20]), + HashMap::new(), + ); + + let mut network_result_2 = NetworkResult::new( + StacksBlockId([0x22; 32]), + 2, + 2, + 2, + 2, + 2, + 2, + 2, + ConsensusHash([0x22; 20]), + HashMap::new(), + ); + + let nk1 = NeighborKey { + peer_version: 1, + network_id: 1, + addrbytes: PeerAddress([0x11; 16]), + port: 1, + }; + + let nk2 = NeighborKey { + peer_version: 2, + network_id: 2, + addrbytes: PeerAddress([0x22; 16]), + port: 2, + }; + + let msg1 = StacksMessage::new( + 1, + 1, + 1, + &BurnchainHeaderHash([0x11; 32]), + 1, + &BurnchainHeaderHash([0x11; 32]), + StacksMessageType::Ping(PingData { nonce: 1 }), + ); + + let mut msg2 = StacksMessage::new( + 2, + 2, + 2, + &BurnchainHeaderHash([0x22; 32]), + 2, + &BurnchainHeaderHash([0x22; 32]), + StacksMessageType::Ping(PingData { nonce: 2 }), + ); + msg2.sign(2, &StacksPrivateKey::new()).unwrap(); + + let pkey_1 = StacksPrivateKey::new(); + let pkey_2 = StacksPrivateKey::new(); + + let pushed_pkey_1 = StacksPrivateKey::new(); + let pushed_pkey_2 = StacksPrivateKey::new(); + + let uploaded_pkey_1 = StacksPrivateKey::new(); + let uploaded_pkey_2 = StacksPrivateKey::new(); + + let blk1 = make_empty_coinbase_block(&pkey_1); + let blk2 = make_empty_coinbase_block(&pkey_2); + + let pushed_blk1 = make_empty_coinbase_block(&pushed_pkey_1); + let pushed_blk2 = make_empty_coinbase_block(&pushed_pkey_2); + + let uploaded_blk1 = make_empty_coinbase_block(&uploaded_pkey_1); + let uploaded_blk2 = make_empty_coinbase_block(&uploaded_pkey_2); + + let mblk1 = make_codec_test_microblock(1); + let mblk2 = make_codec_test_microblock(2); + + let pushed_mblk1 = make_codec_test_microblock(3); + let pushed_mblk2 = make_codec_test_microblock(4); + + let uploaded_mblk1 = make_codec_test_microblock(5); + let uploaded_mblk2 = make_codec_test_microblock(6); + + let pushed_tx1 = make_codec_test_microblock(3).txs[2].clone(); + let pushed_tx2 = make_codec_test_microblock(4).txs[3].clone(); + + let uploaded_tx1 = make_codec_test_microblock(5).txs[4].clone(); + let uploaded_tx2 = make_codec_test_microblock(6).txs[5].clone(); + + let synced_tx1 = make_codec_test_microblock(7).txs[6].clone(); + let synced_tx2 = make_codec_test_microblock(8).txs[7].clone(); + + let naka_header_1 = NakamotoBlockHeader { + version: 1, + chain_length: 1, + burn_spent: 1, + consensus_hash: ConsensusHash([0x01; 20]), + parent_block_id: StacksBlockId([0x01; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x01; 32]), + state_index_root: TrieHash([0x01; 32]), + timestamp: 1, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_header_2 = NakamotoBlockHeader { + version: 2, + chain_length: 2, + burn_spent: 2, + consensus_hash: ConsensusHash([0x02; 20]), + parent_block_id: StacksBlockId([0x02; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x02; 32]), + state_index_root: TrieHash([0x02; 32]), + timestamp: 2, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_pushed_header_1 = NakamotoBlockHeader { + version: 3, + chain_length: 3, + burn_spent: 3, + consensus_hash: ConsensusHash([0x03; 20]), + parent_block_id: StacksBlockId([0x03; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x03; 32]), + state_index_root: TrieHash([0x03; 32]), + timestamp: 3, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_pushed_header_2 = NakamotoBlockHeader { + version: 4, + chain_length: 4, + burn_spent: 4, + consensus_hash: ConsensusHash([0x04; 20]), + parent_block_id: StacksBlockId([0x04; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x04; 32]), + state_index_root: TrieHash([0x04; 32]), + timestamp: 4, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_uploaded_header_1 = NakamotoBlockHeader { + version: 5, + chain_length: 5, + burn_spent: 5, + consensus_hash: ConsensusHash([0x05; 20]), + parent_block_id: StacksBlockId([0x05; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x05; 32]), + state_index_root: TrieHash([0x05; 32]), + timestamp: 5, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let naka_uploaded_header_2 = NakamotoBlockHeader { + version: 6, + chain_length: 6, + burn_spent: 6, + consensus_hash: ConsensusHash([0x06; 20]), + parent_block_id: StacksBlockId([0x06; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), + state_index_root: TrieHash([0x06; 32]), + timestamp: 6, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::zeros(1).unwrap(), + }; + + let nblk1 = NakamotoBlock { + header: naka_header_1.clone(), + txs: vec![], + }; + let nblk2 = NakamotoBlock { + header: naka_header_2.clone(), + txs: vec![], + }; + + let pushed_nblk1 = NakamotoBlock { + header: naka_pushed_header_1.clone(), + txs: vec![], + }; + let pushed_nblk2 = NakamotoBlock { + header: naka_pushed_header_2.clone(), + txs: vec![], + }; + + let uploaded_nblk1 = NakamotoBlock { + header: naka_uploaded_header_1.clone(), + txs: vec![], + }; + let uploaded_nblk2 = NakamotoBlock { + header: naka_uploaded_header_2.clone(), + txs: vec![], + }; + + let pushed_stackerdb_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0x11; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![1], + }, + }; + + let pushed_stackerdb_chunk_2 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0x22; 20]), + chunk_data: StackerDBChunkData { + slot_id: 2, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![2], + }, + }; + + let uploaded_stackerdb_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0x33; 20]), + chunk_data: StackerDBChunkData { + slot_id: 3, + slot_version: 3, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let uploaded_stackerdb_chunk_2 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0x44; 20]), + chunk_data: StackerDBChunkData { + slot_id: 4, + slot_version: 4, + sig: MessageSignature::empty(), + data: vec![4], + }, + }; + + network_result_1 + .unhandled_messages + .insert(nk1.clone(), vec![msg1.clone()]); + network_result_1 + .blocks + .push((ConsensusHash([0x11; 20]), blk1.clone(), 1)); + network_result_1.confirmed_microblocks.push(( + ConsensusHash([0x11; 20]), + vec![mblk1.clone()], + 1, + )); + network_result_1 + .nakamoto_blocks + .insert(nblk1.block_id(), nblk1.clone()); + network_result_1 + .pushed_transactions + .insert(nk1.clone(), vec![(vec![], pushed_tx1.clone())]); + network_result_1.pushed_blocks.insert( + nk1.clone(), + vec![BlocksData { + blocks: vec![BlocksDatum(ConsensusHash([0x11; 20]), pushed_blk1.clone())], + }], + ); + network_result_1.pushed_microblocks.insert( + nk1.clone(), + vec![( + vec![], + MicroblocksData { + index_anchor_block: StacksBlockId([0x11; 32]), + microblocks: vec![pushed_mblk1.clone()], + }, + )], + ); + network_result_1.pushed_nakamoto_blocks.insert( + nk1.clone(), + vec![( + vec![], + NakamotoBlocksData { + blocks: vec![pushed_nblk1], + }, + )], + ); + network_result_1 + .uploaded_transactions + .push(uploaded_tx1.clone()); + network_result_1.uploaded_blocks.push(BlocksData { + blocks: vec![BlocksDatum( + ConsensusHash([0x11; 20]), + uploaded_blk1.clone(), + )], + }); + network_result_1.uploaded_microblocks.push(MicroblocksData { + index_anchor_block: StacksBlockId([0x11; 32]), + microblocks: vec![uploaded_mblk1.clone()], + }); + network_result_1 + .uploaded_nakamoto_blocks + .push(uploaded_nblk1.clone()); + network_result_1 + .pushed_stackerdb_chunks + .push(pushed_stackerdb_chunk_1.clone()); + network_result_1 + .uploaded_stackerdb_chunks + .push(uploaded_stackerdb_chunk_1.clone()); + network_result_1.synced_transactions.push(synced_tx1); + + network_result_2 + .unhandled_messages + .insert(nk2.clone(), vec![msg2.clone()]); + network_result_2 + .blocks + .push((ConsensusHash([0x22; 20]), blk2.clone(), 2)); + network_result_2.confirmed_microblocks.push(( + ConsensusHash([0x22; 20]), + vec![mblk2.clone()], + 2, + )); + network_result_2 + .nakamoto_blocks + .insert(nblk2.block_id(), nblk2.clone()); + network_result_2 + .pushed_transactions + .insert(nk2.clone(), vec![(vec![], pushed_tx2.clone())]); + network_result_2.pushed_blocks.insert( + nk2.clone(), + vec![BlocksData { + blocks: vec![BlocksDatum(ConsensusHash([0x22; 20]), pushed_blk2.clone())], + }], + ); + network_result_2.pushed_microblocks.insert( + nk2.clone(), + vec![( + vec![], + MicroblocksData { + index_anchor_block: StacksBlockId([0x22; 32]), + microblocks: vec![pushed_mblk2.clone()], + }, + )], + ); + network_result_2.pushed_nakamoto_blocks.insert( + nk2.clone(), + vec![( + vec![], + NakamotoBlocksData { + blocks: vec![pushed_nblk2], + }, + )], + ); + network_result_2 + .uploaded_transactions + .push(uploaded_tx2.clone()); + network_result_2.uploaded_blocks.push(BlocksData { + blocks: vec![BlocksDatum( + ConsensusHash([0x22; 20]), + uploaded_blk2.clone(), + )], + }); + network_result_2.uploaded_microblocks.push(MicroblocksData { + index_anchor_block: StacksBlockId([0x22; 32]), + microblocks: vec![uploaded_mblk2.clone()], + }); + network_result_2 + .uploaded_nakamoto_blocks + .push(uploaded_nblk2.clone()); + network_result_2 + .pushed_stackerdb_chunks + .push(pushed_stackerdb_chunk_2.clone()); + network_result_2 + .uploaded_stackerdb_chunks + .push(uploaded_stackerdb_chunk_2.clone()); + network_result_2.synced_transactions.push(synced_tx2); + + let mut network_result_union = network_result_2.clone(); + let mut n1 = network_result_1.clone(); + network_result_union + .unhandled_messages + .extend(n1.unhandled_messages.into_iter()); + network_result_union.blocks.append(&mut n1.blocks); + network_result_union + .confirmed_microblocks + .append(&mut n1.confirmed_microblocks); + network_result_union + .nakamoto_blocks + .extend(n1.nakamoto_blocks.into_iter()); + network_result_union + .pushed_transactions + .extend(n1.pushed_transactions.into_iter()); + network_result_union + .pushed_blocks + .extend(n1.pushed_blocks.into_iter()); + network_result_union + .pushed_microblocks + .extend(n1.pushed_microblocks.into_iter()); + network_result_union + .pushed_nakamoto_blocks + .extend(n1.pushed_nakamoto_blocks.into_iter()); + network_result_union + .uploaded_transactions + .append(&mut n1.uploaded_transactions); + network_result_union + .uploaded_blocks + .append(&mut n1.uploaded_blocks); + network_result_union + .uploaded_microblocks + .append(&mut n1.uploaded_microblocks); + network_result_union + .uploaded_nakamoto_blocks + .append(&mut n1.uploaded_nakamoto_blocks); + // stackerdb chunks from n1 get dropped since their rc_consensus_hash no longer matches + network_result_union + .synced_transactions + .append(&mut n1.synced_transactions); + + // update is idempotent + let old = network_result_1.clone(); + let new = network_result_1.clone(); + assert_eq!(old.update(new), network_result_1); + + // disjoint results get unioned, except for stackerdb chunks + let old = network_result_1.clone(); + let new = network_result_2.clone(); + assert_eq!(old.update(new), network_result_union); + + // merging a subset is idempotent + assert_eq!( + network_result_1 + .clone() + .update(network_result_union.clone()), + network_result_union + ); + assert_eq!( + network_result_2 + .clone() + .update(network_result_union.clone()), + network_result_union + ); + + // stackerdb uploaded chunks get consolidated correctly + let mut old = NetworkResult::new( + StacksBlockId([0xaa; 32]), + 10, + 10, + 10, + 10, + 10, + 10, + 10, + ConsensusHash([0xaa; 20]), + HashMap::new(), + ); + let mut new = old.clone(); + + let old_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let new_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let new_chunk_2 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 2, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + old.uploaded_stackerdb_chunks.push(old_chunk_1.clone()); + // replaced + new.uploaded_stackerdb_chunks.push(new_chunk_1.clone()); + // included + new.uploaded_stackerdb_chunks.push(new_chunk_2.clone()); + + assert_eq!( + old.update(new).uploaded_stackerdb_chunks, + vec![new_chunk_1.clone(), new_chunk_2.clone()] + ); + + // stackerdb pushed chunks get consolidated correctly + let mut old = NetworkResult::new( + StacksBlockId([0xaa; 32]), + 10, + 10, + 10, + 10, + 10, + 10, + 10, + ConsensusHash([0xaa; 20]), + HashMap::new(), + ); + let mut new = old.clone(); + + let old_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 1, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let new_chunk_1 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 1, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + let new_chunk_2 = StackerDBPushChunkData { + contract_id: QualifiedContractIdentifier::transient(), + rc_consensus_hash: ConsensusHash([0xaa; 20]), + chunk_data: StackerDBChunkData { + slot_id: 2, + slot_version: 2, + sig: MessageSignature::empty(), + data: vec![3], + }, + }; + + old.pushed_stackerdb_chunks.push(old_chunk_1.clone()); + // replaced + new.pushed_stackerdb_chunks.push(new_chunk_1.clone()); + // included + new.pushed_stackerdb_chunks.push(new_chunk_2.clone()); + + assert_eq!( + old.update(new).pushed_stackerdb_chunks, + vec![new_chunk_1.clone(), new_chunk_2.clone()] + ); + + // nakamoto blocks obtained via download, upload, or pushed get consoldated + let mut old = NetworkResult::new( + StacksBlockId([0xbb; 32]), + 11, + 11, + 11, + 11, + 11, + 11, + 11, + ConsensusHash([0xbb; 20]), + HashMap::new(), + ); + old.nakamoto_blocks.insert(nblk1.block_id(), nblk1.clone()); + old.pushed_nakamoto_blocks.insert( + nk1.clone(), + vec![( + vec![], + NakamotoBlocksData { + blocks: vec![nblk1.clone()], + }, + )], + ); + old.uploaded_nakamoto_blocks.push(nblk1.clone()); + + let new = NetworkResult::new( + StacksBlockId([0xbb; 32]), + 11, + 11, + 11, + 11, + 11, + 11, + 11, + ConsensusHash([0xbb; 20]), + HashMap::new(), + ); + + let mut new_pushed = new.clone(); + let mut new_uploaded = new.clone(); + let mut new_downloaded = new.clone(); + + new_downloaded + .nakamoto_blocks + .insert(nblk1.block_id(), nblk1.clone()); + new_pushed.pushed_nakamoto_blocks.insert( + nk2.clone(), + vec![( + vec![], + NakamotoBlocksData { + blocks: vec![nblk1.clone()], + }, + )], + ); + new_uploaded.uploaded_nakamoto_blocks.push(nblk1.clone()); + + debug!("===="); + let updated_downloaded = old.clone().update(new_downloaded); + assert_eq!(updated_downloaded.nakamoto_blocks.len(), 1); + assert_eq!( + updated_downloaded + .nakamoto_blocks + .get(&nblk1.block_id()) + .unwrap(), + &nblk1 + ); + assert_eq!(updated_downloaded.pushed_nakamoto_blocks.len(), 0); + assert_eq!(updated_downloaded.uploaded_nakamoto_blocks.len(), 0); + + debug!("===="); + let updated_pushed = old.clone().update(new_pushed); + assert_eq!(updated_pushed.nakamoto_blocks.len(), 0); + assert_eq!(updated_pushed.pushed_nakamoto_blocks.len(), 1); + assert_eq!( + updated_pushed + .pushed_nakamoto_blocks + .get(&nk2) + .unwrap() + .len(), + 1 + ); + assert_eq!( + updated_pushed.pushed_nakamoto_blocks.get(&nk2).unwrap()[0] + .1 + .blocks + .len(), + 1 + ); + assert_eq!( + updated_pushed.pushed_nakamoto_blocks.get(&nk2).unwrap()[0] + .1 + .blocks[0], + nblk1 + ); + assert_eq!(updated_pushed.uploaded_nakamoto_blocks.len(), 0); + + debug!("===="); + let updated_uploaded = old.clone().update(new_uploaded); + assert_eq!(updated_uploaded.nakamoto_blocks.len(), 0); + assert_eq!(updated_uploaded.pushed_nakamoto_blocks.len(), 0); + assert_eq!(updated_uploaded.uploaded_nakamoto_blocks.len(), 1); + assert_eq!(updated_uploaded.uploaded_nakamoto_blocks[0], nblk1); +} diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index 03b1224312..6a1ef7a4e9 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -21,7 +21,7 @@ use stacks_common::util::hash::*; use stacks_common::util::sleep_ms; use crate::core::{ - StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + EpochList, StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_TESTNET, STACKS_EPOCH_MAX, }; use crate::net::asn::*; @@ -350,23 +350,23 @@ fn test_step_walk_1_neighbor_bad_epoch() { // peer 1 thinks its always epoch 2.0 peer_1_config.peer_version = 0x18000000; - peer_1_config.epochs = Some(vec![StacksEpoch { + peer_1_config.epochs = Some(EpochList::new(&[StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, end_height: STACKS_EPOCH_MAX, block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_0, - }]); + }])); // peer 2 thinks its always epoch 2.05 peer_2_config.peer_version = 0x18000005; - peer_2_config.epochs = Some(vec![StacksEpoch { + peer_2_config.epochs = Some(EpochList::new(&[StacksEpoch { epoch_id: StacksEpochId::Epoch2_05, start_height: 0, end_height: STACKS_EPOCH_MAX, block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_05, - }]); + }])); let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 23d1dd60a8..f4fc8d9eb8 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -2759,7 +2759,7 @@ fn process_new_blocks_rejects_problematic_asts() { let mut peer_config = TestPeerConfig::new(function_name!(), 32019, 32020); peer_config.initial_balances = initial_balances; - peer_config.epochs = Some(vec![ + peer_config.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -2774,7 +2774,7 @@ fn process_new_blocks_rejects_problematic_asts() { block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_05, }, - ]); + ])); let burnchain = peer_config.burnchain.clone(); // activate new AST rules right away @@ -3097,6 +3097,8 @@ fn process_new_blocks_rejects_problematic_asts() { 0, 0, 0, + 0, + 0, ConsensusHash([0x01; 20]), HashMap::new(), ); @@ -3164,7 +3166,7 @@ fn process_new_blocks_rejects_problematic_asts() { #[test] fn test_block_pay_to_contract_gated_at_v210() { let mut peer_config = TestPeerConfig::new(function_name!(), 4246, 4247); - let epochs = vec![ + let epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -3193,7 +3195,7 @@ fn test_block_pay_to_contract_gated_at_v210() { block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]; + ]); peer_config.epochs = Some(epochs); let burnchain = peer_config.burnchain.clone(); @@ -3338,7 +3340,7 @@ fn test_block_versioned_smart_contract_gated_at_v210() { 1000000, )]; - let epochs = vec![ + let epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -3367,7 +3369,7 @@ fn test_block_versioned_smart_contract_gated_at_v210() { block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]; + ]); peer_config.epochs = Some(epochs); peer_config.initial_balances = initial_balances; @@ -3518,7 +3520,7 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { 1000000, )]; - let epochs = vec![ + let epochs = EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -3547,7 +3549,7 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]; + ]); peer_config.epochs = Some(epochs); peer_config.initial_balances = initial_balances; diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index d10a6ee368..231e0a91af 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -1221,13 +1221,14 @@ impl PeerNetwork { ) { // unable to store this due to quota being exceeded + debug!("{:?}: drop message to quota being exceeded: {:?}", self.get_local_peer(), &message.payload.get_message_description()); return false; } if !buffer { debug!( "{:?}: Re-try handling buffered sortition-bound message {} from {:?}", - &self.get_local_peer(), + self.get_local_peer(), &message.payload.get_message_description(), &neighbor_key ); diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 958820b491..0c68d22ee7 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -32,6 +32,7 @@ rusqlite = { workspace = true } async-h1 = { version = "2.3.2", optional = true } async-std = { version = "1.6", optional = true, features = ["attributes"] } http-types = { version = "2.12", optional = true } +thiserror = { workspace = true } [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 82282926d3..727483886e 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -48,7 +48,7 @@ use stacks::chainstate::burn::Opcodes; use stacks::chainstate::coordinator::comm::CoordinatorChannels; #[cfg(test)] use stacks::chainstate::stacks::address::PoxAddress; -use stacks::core::{StacksEpoch, StacksEpochId}; +use stacks::core::{EpochList, StacksEpochId}; use stacks::monitoring::{increment_btc_blocks_received_counter, increment_btc_ops_sent_counter}; use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; @@ -135,16 +135,16 @@ pub fn addr2str(btc_addr: &BitcoinAddress) -> String { if let BitcoinAddress::Segwit(segwit_addr) = btc_addr { // regtest segwit addresses use a different hrp let s = segwit_addr.to_bech32_hrp("bcrt"); - warn!("Re-encoding {} to {}", &segwit_addr, &s); + warn!("Re-encoding {segwit_addr} to {s}"); s } else { - format!("{}", &btc_addr) + format!("{btc_addr}") } } #[cfg(not(test))] pub fn addr2str(btc_addr: &BitcoinAddress) -> String { - format!("{}", &btc_addr) + format!("{btc_addr}") } // TODO: add tests from mutation testing results #4862 @@ -186,12 +186,11 @@ pub fn make_bitcoin_indexer( let (_, network_type) = config.burnchain.get_bitcoin_network(); let indexer_runtime = BitcoinIndexerRuntime::new(network_type); - let burnchain_indexer = BitcoinIndexer { + BitcoinIndexer { config: indexer_config, runtime: indexer_runtime, - should_keep_running: should_keep_running, - }; - burnchain_indexer + should_keep_running, + } } pub fn get_satoshis_per_byte(config: &Config) -> u64 { @@ -215,7 +214,7 @@ impl LeaderBlockCommitFees { let mut fees = LeaderBlockCommitFees::estimated_fees_from_payload(payload, config); fees.spent_in_attempts = cmp::max(1, self.spent_in_attempts); fees.final_size = self.final_size; - fees.fee_rate = self.fee_rate + get_rbf_fee_increment(&config); + fees.fee_rate = self.fee_rate + get_rbf_fee_increment(config); fees.is_rbf_enabled = true; fees } @@ -306,8 +305,7 @@ impl BitcoinRegtestController { burnchain: Option, should_keep_running: Option>, ) -> Self { - std::fs::create_dir_all(&config.get_burnchain_path_str()) - .expect("Unable to create workdir"); + std::fs::create_dir_all(config.get_burnchain_path_str()).expect("Unable to create workdir"); let (_, network_id) = config.burnchain.get_bitcoin_network(); let res = SpvClient::new( @@ -319,15 +317,15 @@ impl BitcoinRegtestController { false, ); if let Err(err) = res { - error!("Unable to init block headers: {}", err); + error!("Unable to init block headers: {err}"); panic!() } let burnchain_params = burnchain_params_from_config(&config.burnchain); if network_id == BitcoinNetworkType::Mainnet && config.burnchain.epochs.is_some() { - panic!("It is an error to set custom epochs while running on Mainnet: network_id {:?} config.burnchain {:#?}", - &network_id, &config.burnchain); + panic!("It is an error to set custom epochs while running on Mainnet: network_id {network_id:?} config.burnchain {:#?}", + &config.burnchain); } let indexer_config = { @@ -434,11 +432,10 @@ impl BitcoinRegtestController { /// Get the default Burnchain instance from our config fn default_burnchain(&self) -> Burnchain { - let burnchain = match &self.burnchain_config { + match &self.burnchain_config { Some(burnchain) => burnchain.clone(), None => self.config.get_burnchain(), - }; - burnchain + } } /// Get the PoX constants in use @@ -465,7 +462,7 @@ impl BitcoinRegtestController { } Err(e) => { // keep trying - error!("Unable to sync with burnchain: {}", e); + error!("Unable to sync with burnchain: {e}"); match e { burnchain_error::TrySyncAgain => { // try again immediately @@ -491,7 +488,7 @@ impl BitcoinRegtestController { (None, Some(chain_tip)) => chain_tip.clone(), (Some(state_transition), _) => { let burnchain_tip = BurnchainTip { - block_snapshot: block_snapshot, + block_snapshot, state_transition: BurnchainStateTransitionOps::from(state_transition), received_at: Instant::now(), }; @@ -501,7 +498,7 @@ impl BitcoinRegtestController { (None, None) => { // can happen at genesis let burnchain_tip = BurnchainTip { - block_snapshot: block_snapshot, + block_snapshot, state_transition: BurnchainStateTransitionOps::noop(), received_at: Instant::now(), }; @@ -576,7 +573,7 @@ impl BitcoinRegtestController { } Err(e) => { // keep trying - error!("Unable to sync with burnchain: {}", e); + error!("Unable to sync with burnchain: {e}"); match e { burnchain_error::CoordinatorClosed => { return Err(BurnchainControllerError::CoordinatorClosed) @@ -602,8 +599,8 @@ impl BitcoinRegtestController { }; let burnchain_tip = BurnchainTip { - block_snapshot: block_snapshot, - state_transition: state_transition, + block_snapshot, + state_transition, received_at: Instant::now(), }; @@ -641,11 +638,11 @@ impl BitcoinRegtestController { let filter_addresses = vec![addr2str(&address)]; let pubk = if self.config.miner.segwit { - let mut p = public_key.clone(); + let mut p = *public_key; p.set_compressed(true); p } else { - public_key.clone() + *public_key }; test_debug!("Import public key '{}'", &pubk.to_hex()); @@ -685,7 +682,7 @@ impl BitcoinRegtestController { let parsed_utxo: ParsedUTXO = match serde_json::from_value(entry) { Ok(utxo) => utxo, Err(err) => { - warn!("Failed parsing UTXO: {}", err); + warn!("Failed parsing UTXO: {err}"); continue; } }; @@ -753,11 +750,11 @@ impl BitcoinRegtestController { } let pubk = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { - let mut p = public_key.clone(); + let mut p = *public_key; p.set_compressed(true); p } else { - public_key.clone() + *public_key }; // Configure UTXO filter @@ -786,7 +783,7 @@ impl BitcoinRegtestController { break utxos; } Err(e) => { - error!("Bitcoin RPC failure: error listing utxos {:?}", e); + error!("Bitcoin RPC failure: error listing utxos {e:?}"); sleep_ms(5000); continue; } @@ -817,13 +814,13 @@ impl BitcoinRegtestController { utxos = match result { Ok(utxos) => utxos, Err(e) => { - error!("Bitcoin RPC failure: error listing utxos {:?}", e); + error!("Bitcoin RPC failure: error listing utxos {e:?}"); sleep_ms(5000); continue; } }; - test_debug!("Unspent for {:?}: {:?}", &filter_addresses, &utxos); + test_debug!("Unspent for {filter_addresses:?}: {utxos:?}"); if utxos.is_empty() { return None; @@ -832,20 +829,14 @@ impl BitcoinRegtestController { } } } else { - debug!( - "Got {} UTXOs for {:?}", - utxos.utxos.len(), - &filter_addresses - ); + debug!("Got {} UTXOs for {filter_addresses:?}", utxos.utxos.len(),); utxos }; let total_unspent = utxos.total_available(); if total_unspent < total_required { warn!( - "Total unspent {} < {} for {:?}", - total_unspent, - total_required, + "Total unspent {total_unspent} < {total_required} for {:?}", &pubk.to_hex() ); return None; @@ -1013,7 +1004,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1026,10 +1017,8 @@ impl BitcoinRegtestController { }; tx.output = vec![consensus_output]; - tx.output.push( - PoxAddress::Standard(payload.recipient.clone(), None) - .to_bitcoin_tx_out(DUST_UTXO_LIMIT), - ); + tx.output + .push(PoxAddress::Standard(payload.recipient, None).to_bitcoin_tx_out(DUST_UTXO_LIMIT)); self.finalize_tx( epoch_id, @@ -1099,7 +1088,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1113,8 +1102,7 @@ impl BitcoinRegtestController { tx.output = vec![consensus_output]; tx.output.push( - PoxAddress::Standard(payload.delegate_to.clone(), None) - .to_bitcoin_tx_out(DUST_UTXO_LIMIT), + PoxAddress::Standard(payload.delegate_to, None).to_bitcoin_tx_out(DUST_UTXO_LIMIT), ); self.finalize_tx( @@ -1180,7 +1168,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1271,7 +1259,7 @@ impl BitcoinRegtestController { tx.output = vec![consensus_output]; tx.output - .push(PoxAddress::Standard(payload.output.clone(), None).to_bitcoin_tx_out(output_amt)); + .push(PoxAddress::Standard(payload.output, None).to_bitcoin_tx_out(output_amt)); self.finalize_tx( epoch_id, @@ -1347,7 +1335,7 @@ impl BitcoinRegtestController { let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); payload .consensus_serialize(&mut bytes) - .map_err(|e| BurnchainControllerError::SerializerError(e))?; + .map_err(BurnchainControllerError::SerializerError)?; bytes }; @@ -1388,10 +1376,9 @@ impl BitcoinRegtestController { fn magic_bytes(&self) -> Vec { #[cfg(test)] { - if let Some(set_bytes) = TEST_MAGIC_BYTES + if let Some(set_bytes) = *TEST_MAGIC_BYTES .lock() .expect("FATAL: test magic bytes mutex poisoned") - .clone() { return set_bytes.to_vec(); } @@ -1399,6 +1386,7 @@ impl BitcoinRegtestController { self.config.burnchain.magic_bytes.as_bytes().to_vec() } + #[allow(clippy::too_many_arguments)] fn send_block_commit_operation( &mut self, epoch_id: StacksEpochId, @@ -1407,7 +1395,7 @@ impl BitcoinRegtestController { utxos_to_include: Option, utxos_to_exclude: Option, previous_fees: Option, - previous_txids: &Vec, + previous_txids: &[Txid], ) -> Result { let _ = self.sortdb_mut(); let burn_chain_tip = self @@ -1433,6 +1421,7 @@ impl BitcoinRegtestController { ) } + #[allow(clippy::too_many_arguments)] fn send_block_commit_operation_at_burnchain_height( &mut self, epoch_id: StacksEpochId, @@ -1441,7 +1430,7 @@ impl BitcoinRegtestController { utxos_to_include: Option, utxos_to_exclude: Option, mut estimated_fees: LeaderBlockCommitFees, - previous_txids: &Vec, + previous_txids: &[Txid], burnchain_block_height: u64, ) -> Result { let public_key = signer.get_public_key(); @@ -1500,10 +1489,10 @@ impl BitcoinRegtestController { let mut txid = tx.txid().as_bytes().to_vec(); txid.reverse(); - debug!("Transaction relying on UTXOs: {:?}", utxos); + debug!("Transaction relying on UTXOs: {utxos:?}"); let txid = Txid::from_bytes(&txid[..]).unwrap(); - let mut txids = previous_txids.clone(); - txids.push(txid.clone()); + let mut txids = previous_txids.to_vec(); + txids.push(txid); let ongoing_block_commit = OngoingBlockCommit { payload, utxos, @@ -1512,12 +1501,11 @@ impl BitcoinRegtestController { }; info!( - "Miner node: submitting leader_block_commit (txid: {}, rbf: {}, total spent: {}, size: {}, fee_rate: {})", + "Miner node: submitting leader_block_commit (txid: {}, rbf: {}, total spent: {}, size: {}, fee_rate: {fee_rate})", txid.to_hex(), ongoing_block_commit.fees.is_rbf_enabled, ongoing_block_commit.fees.total_spent(), - ongoing_block_commit.fees.final_size, - fee_rate, + ongoing_block_commit.fees.final_size ); self.ongoing_block_commit = Some(ongoing_block_commit); @@ -1537,15 +1525,8 @@ impl BitcoinRegtestController { // Are we currently tracking an operation? if self.ongoing_block_commit.is_none() || !self.allow_rbf { // Good to go, let's build the transaction and send it. - let res = self.send_block_commit_operation( - epoch_id, - payload, - signer, - None, - None, - None, - &vec![], - ); + let res = + self.send_block_commit_operation(epoch_id, payload, signer, None, None, None, &[]); return res; } @@ -1563,10 +1544,7 @@ impl BitcoinRegtestController { Ok(true) ); if ongoing_tx_confirmed { - debug!( - "Was able to retrieve confirmation of ongoing burnchain TXID - {}", - txid - ); + debug!("Was able to retrieve confirmation of ongoing burnchain TXID - {txid}"); let res = self.send_block_commit_operation( epoch_id, payload, @@ -1574,11 +1552,11 @@ impl BitcoinRegtestController { None, None, None, - &vec![], + &[], ); return res; } else { - debug!("Was unable to retrieve ongoing TXID - {}", txid); + debug!("Was unable to retrieve ongoing TXID - {txid}"); }; } @@ -1589,13 +1567,13 @@ impl BitcoinRegtestController { .map_err(|_| BurnchainControllerError::BurnchainError)?; let mut found_last_mined_at = false; while traversal_depth < UTXO_CACHE_STALENESS_LIMIT { - if &burn_chain_tip.block_hash == &ongoing_op.utxos.bhh { + if burn_chain_tip.block_hash == ongoing_op.utxos.bhh { found_last_mined_at = true; break; } let parent = BurnchainDB::get_burnchain_block( - &burnchain_db.conn(), + burnchain_db.conn(), &burn_chain_tip.parent_block_hash, ) .map_err(|_| BurnchainControllerError::BurnchainError)?; @@ -1609,15 +1587,8 @@ impl BitcoinRegtestController { "Possible presence of fork or stale UTXO cache, invalidating cached set of UTXOs."; "cached_burn_block_hash" => %ongoing_op.utxos.bhh, ); - let res = self.send_block_commit_operation( - epoch_id, - payload, - signer, - None, - None, - None, - &vec![], - ); + let res = + self.send_block_commit_operation(epoch_id, payload, signer, None, None, None, &[]); return res; } @@ -1659,7 +1630,7 @@ impl BitcoinRegtestController { None, Some(ongoing_op.utxos.clone()), None, - &vec![], + &[], ) } else { // Case 2) ii): Attempt to RBF @@ -1724,9 +1695,9 @@ impl BitcoinRegtestController { } else { // Fetch some UTXOs let addr = self.get_miner_address(epoch_id, public_key); - let utxos = match self.get_utxos( + match self.get_utxos( epoch_id, - &public_key, + public_key, total_required, utxos_to_exclude, block_height, @@ -1734,15 +1705,13 @@ impl BitcoinRegtestController { Some(utxos) => utxos, None => { warn!( - "No UTXOs for {} ({}) in epoch {}", + "No UTXOs for {} ({}) in epoch {epoch_id}", &public_key.to_hex(), - &addr2str(&addr), - epoch_id + &addr2str(&addr) ); return Err(BurnchainControllerError::NoUTXOs); } - }; - utxos + } }; // Prepare a backbone for the tx @@ -1756,6 +1725,7 @@ impl BitcoinRegtestController { Ok((transaction, utxos)) } + #[allow(clippy::too_many_arguments)] fn finalize_tx( &mut self, epoch_id: StacksEpochId, @@ -1854,18 +1824,14 @@ impl BitcoinRegtestController { } if total_consumed < total_target { - warn!( - "Consumed total {} is less than intended spend: {}", - total_consumed, total_target - ); + warn!("Consumed total {total_consumed} is less than intended spend: {total_target}"); return false; } // Append the change output let value = total_consumed - tx_cost; debug!( - "Payments value: {:?}, total_consumed: {:?}, total_spent: {:?}", - value, total_consumed, total_target + "Payments value: {value:?}, total_consumed: {total_consumed:?}, total_spent: {total_target:?}" ); if value >= DUST_UTXO_LIMIT { let change_output = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { @@ -1884,7 +1850,7 @@ impl BitcoinRegtestController { debug!("Not enough change to clear dust limit. Not adding change address."); } - for (_i, utxo) in utxos_set.utxos.iter().enumerate() { + for utxo in utxos_set.utxos.iter() { let input = TxIn { previous_output: OutPoint { txid: utxo.txid, @@ -1958,8 +1924,8 @@ impl BitcoinRegtestController { transaction.txid() }) .map_err(|e| { - error!("Bitcoin RPC error: transaction submission failed - {:?}", e); - BurnchainControllerError::TransactionSubmissionFailed(format!("{:?}", e)) + error!("Bitcoin RPC error: transaction submission failed - {e:?}"); + BurnchainControllerError::TransactionSubmissionFailed(format!("{e:?}")) }) } @@ -1977,8 +1943,8 @@ impl BitcoinRegtestController { if debug_ctr % 10 == 0 { debug!( - "Waiting until canonical sortition height reaches {} (currently {})", - height_to_wait, canonical_sortition_tip.block_height + "Waiting until canonical sortition height reaches {height_to_wait} (currently {})", + canonical_sortition_tip.block_height ); } debug_ctr += 1; @@ -2012,7 +1978,7 @@ impl BitcoinRegtestController { /// Instruct a regtest Bitcoin node to build the next block. pub fn build_next_block(&self, num_blocks: u64) { - debug!("Generate {} block(s)", num_blocks); + debug!("Generate {num_blocks} block(s)"); let public_key_bytes = match &self.config.burnchain.local_mining_public_key { Some(public_key) => hex_bytes(public_key).expect("Invalid byte sequence"), None => panic!("Unable to make new block, mining public key"), @@ -2028,7 +1994,7 @@ impl BitcoinRegtestController { match result { Ok(_) => {} Err(e) => { - error!("Bitcoin RPC failure: error generating block {:?}", e); + error!("Bitcoin RPC failure: error generating block {e:?}"); panic!(); } } @@ -2036,7 +2002,7 @@ impl BitcoinRegtestController { #[cfg(test)] pub fn invalidate_block(&self, block: &BurnchainHeaderHash) { - info!("Invalidating block {}", &block); + info!("Invalidating block {block}"); let request = BitcoinRPCRequest { method: "invalidateblock".into(), params: vec![json!(&block.to_string())], @@ -2044,7 +2010,7 @@ impl BitcoinRegtestController { jsonrpc: "2.0".into(), }; if let Err(e) = BitcoinRPCRequest::send(&self.config, request) { - error!("Bitcoin RPC failure: error invalidating block {:?}", e); + error!("Bitcoin RPC failure: error invalidating block {e:?}"); panic!(); } } @@ -2062,7 +2028,7 @@ impl BitcoinRegtestController { BurnchainHeaderHash::from_hex(v.get("result").unwrap().as_str().unwrap()).unwrap() } Err(e) => { - error!("Bitcoin RPC failure: error invalidating block {:?}", e); + error!("Bitcoin RPC failure: error invalidating block {e:?}"); panic!(); } } @@ -2118,7 +2084,7 @@ impl BitcoinRegtestController { } }; - transaction.map(|tx| SerializedTx::new(tx)) + transaction.map(SerializedTx::new) } #[cfg(test)] @@ -2139,7 +2105,7 @@ impl BitcoinRegtestController { for pk in pks { debug!("Import public key '{}'", &pk.to_hex()); - if let Err(e) = BitcoinRPCRequest::import_public_key(&self.config, &pk) { + if let Err(e) = BitcoinRPCRequest::import_public_key(&self.config, pk) { warn!("Error when importing pubkey: {e:?}"); } } @@ -2157,7 +2123,7 @@ impl BitcoinRegtestController { num_blocks.try_into().unwrap(), addr2str(&address), ) { - error!("Bitcoin RPC failure: error generating block {:?}", e); + error!("Bitcoin RPC failure: error generating block {e:?}"); panic!(); } return; @@ -2165,7 +2131,7 @@ impl BitcoinRegtestController { // otherwise, round robin generate blocks for i in 0..num_blocks { - let pk = &pks[usize::try_from(i % pks.len()).unwrap()]; + let pk = &pks[i % pks.len()]; let address = self.get_miner_address(StacksEpochId::Epoch21, pk); if i < pks.len() { debug!( @@ -2177,7 +2143,7 @@ impl BitcoinRegtestController { if let Err(e) = BitcoinRPCRequest::generate_to_address(&self.config, 1, addr2str(&address)) { - error!("Bitcoin RPC failure: error generating block {:?}", e); + error!("Bitcoin RPC failure: error generating block {e:?}"); panic!(); } } @@ -2240,7 +2206,7 @@ impl BurnchainController for BitcoinRegtestController { Ok(()) } - fn get_stacks_epochs(&self) -> Vec { + fn get_stacks_epochs(&self) -> EpochList { self.indexer.get_stacks_epochs() } @@ -2249,10 +2215,7 @@ impl BurnchainController for BitcoinRegtestController { target_block_height_opt: Option, ) -> Result<(BurnchainTip, u64), BurnchainControllerError> { // if no target block height is given, just fetch the first burnchain block. - self.receive_blocks( - false, - target_block_height_opt.map_or_else(|| Some(1), |x| Some(x)), - ) + self.receive_blocks(false, target_block_height_opt.map_or_else(|| Some(1), Some)) } fn sync( @@ -2271,10 +2234,7 @@ impl BurnchainController for BitcoinRegtestController { // Evaluate process_exit_at_block_height setting if let Some(cap) = self.config.burnchain.process_exit_at_block_height { if burnchain_tip.block_snapshot.block_height >= cap { - info!( - "Node succesfully reached the end of the ongoing {} blocks epoch!", - cap - ); + info!("Node succesfully reached the end of the ongoing {cap} blocks epoch!"); info!("This process will automatically terminate in 30s, restart your node for participating in the next epoch."); sleep_ms(30000); std::process::exit(0); @@ -2351,13 +2311,12 @@ impl SerializedTx { } pub fn txid(&self) -> Txid { - self.txid.clone() + self.txid } pub fn to_hex(&self) -> String { - let formatted_bytes: Vec = - self.bytes.iter().map(|b| format!("{:02x}", b)).collect(); - format!("{}", formatted_bytes.join("")) + let formatted_bytes: Vec = self.bytes.iter().map(|b| format!("{b:02x}")).collect(); + formatted_bytes.join("").to_string() } } @@ -2389,7 +2348,7 @@ impl ParsedUTXO { Some(Sha256dHash::from(&txid[..])) } Err(err) => { - warn!("Unable to get txid from UTXO {}", err); + warn!("Unable to get txid from UTXO {err}"); None } } @@ -2418,8 +2377,8 @@ impl ParsedUTXO { Some(amount) } (lhs, rhs) => { - warn!("Error while converting BTC to sat {:?} - {:?}", lhs, rhs); - return None; + warn!("Error while converting BTC to sat {lhs:?} - {rhs:?}"); + None } } } @@ -2431,7 +2390,7 @@ impl ParsedUTXO { let base: u64 = 10; let int_part = amount / base.pow(8); let frac_part = amount % base.pow(8); - let amount = format!("{}.{:08}", int_part, frac_part); + let amount = format!("{int_part}.{frac_part:08}"); amount } @@ -2469,13 +2428,13 @@ type RPCResult = Result; impl From for RPCError { fn from(ioe: io::Error) -> Self { - Self::Network(format!("IO Error: {:?}", &ioe)) + Self::Network(format!("IO Error: {ioe:?}")) } } impl From for RPCError { fn from(ne: NetError) -> Self { - Self::Network(format!("Net Error: {:?}", &ne)) + Self::Network(format!("Net Error: {ne:?}")) } } @@ -2488,11 +2447,11 @@ impl BitcoinRPCRequest { _ => None, }; let url = config.burnchain.get_rpc_url(wallet_id); - Url::parse(&url).unwrap_or_else(|_| panic!("Unable to parse {} as a URL", url)) + Url::parse(&url).unwrap_or_else(|_| panic!("Unable to parse {url} as a URL")) }; debug!( - "BitcoinRPC builder '{}': {:?}:{:?}@{}", - &payload.method, &config.burnchain.username, &config.burnchain.password, &url + "BitcoinRPC builder '{}': {:?}:{:?}@{url}", + &payload.method, &config.burnchain.username, &config.burnchain.password ); let host = url @@ -2516,27 +2475,26 @@ impl BitcoinRPCRequest { .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); request.add_header("Connection".into(), "close".into()); - match (&config.burnchain.username, &config.burnchain.password) { - (Some(username), Some(password)) => { - let auth_token = format!("Basic {}", encode(format!("{}:{}", username, password))); - request.add_header("Authorization".into(), auth_token); - } - (_, _) => {} - }; + if let (Some(username), Some(password)) = + (&config.burnchain.username, &config.burnchain.password) + { + let auth_token = format!("Basic {}", encode(format!("{username}:{password}"))); + request.add_header("Authorization".into(), auth_token); + } request } #[cfg(test)] pub fn get_raw_transaction(config: &Config, txid: &Txid) -> RPCResult { - debug!("Get raw transaction {}", txid); + debug!("Get raw transaction {txid}"); let payload = BitcoinRPCRequest { method: "getrawtransaction".to_string(), - params: vec![format!("{}", txid).into()], + params: vec![format!("{txid}").into()], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; - let res = BitcoinRPCRequest::send(&config, payload)?; - debug!("Got raw transaction {}: {:?}", txid, &res); + let res = BitcoinRPCRequest::send(config, payload)?; + debug!("Got raw transaction {txid}: {res:?}"); Ok(res.get("result").unwrap().as_str().unwrap().to_string()) } @@ -2544,11 +2502,11 @@ impl BitcoinRPCRequest { pub fn check_transaction_confirmed(config: &Config, txid: &Txid) -> RPCResult { let payload = BitcoinRPCRequest { method: "gettransaction".to_string(), - params: vec![format!("{}", txid).into()], + params: vec![format!("{txid}").into()], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; - let res = BitcoinRPCRequest::send(&config, payload)?; + let res = BitcoinRPCRequest::send(config, payload)?; let confirmations = res .get("result") .ok_or_else(|| RPCError::Parsing("No 'result' field in bitcoind RPC response".into()))? @@ -2567,7 +2525,7 @@ impl BitcoinRPCRequest { } pub fn generate_to_address(config: &Config, num_blocks: u64, address: String) -> RPCResult<()> { - debug!("Generate {} blocks to {}", num_blocks, &address); + debug!("Generate {num_blocks} blocks to {address}"); let payload = BitcoinRPCRequest { method: "generatetoaddress".to_string(), params: vec![num_blocks.into(), address.clone().into()], @@ -2575,11 +2533,8 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let res = BitcoinRPCRequest::send(&config, payload)?; - debug!( - "Generated {} blocks to {}: {:?}", - num_blocks, &address, &res - ); + let res = BitcoinRPCRequest::send(config, payload)?; + debug!("Generated {num_blocks} blocks to {address}: {res:?}"); Ok(()) } @@ -2598,21 +2553,17 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let mut res = BitcoinRPCRequest::send(&config, payload)?; - let bhh = match res.as_object_mut() { - Some(res) => { - let res = res - .get("result") - .ok_or(RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - let bhh: String = serde_json::from_value(res.to_owned()) - .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - let bhh = BurnchainHeaderHash::from_hex(&bhh) - .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - bhh - } - _ => return Err(RPCError::Parsing("Failed to get UTXOs".to_string())), + let mut res = BitcoinRPCRequest::send(config, payload)?; + let Some(res) = res.as_object_mut() else { + return Err(RPCError::Parsing("Failed to get UTXOs".to_string())); }; - + let res = res + .get("result") + .ok_or(RPCError::Parsing("Failed to get bestblockhash".to_string()))?; + let bhh_string: String = serde_json::from_value(res.to_owned()) + .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; + let bhh = BurnchainHeaderHash::from_hex(&bhh_string) + .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; let min_conf = 0i64; let max_conf = 9999999i64; let minimum_amount = ParsedUTXO::sat_to_serialized_btc(minimum_sum_amount); @@ -2630,7 +2581,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let mut res = BitcoinRPCRequest::send(&config, payload)?; + let mut res = BitcoinRPCRequest::send(config, payload)?; let txids_to_filter = if let Some(utxos_to_exclude) = utxos_to_exclude { utxos_to_exclude .utxos @@ -2650,7 +2601,7 @@ impl BitcoinRPCRequest { let parsed_utxo: ParsedUTXO = match serde_json::from_value(entry) { Ok(utxo) => utxo, Err(err) => { - warn!("Failed parsing UTXO: {}", err); + warn!("Failed parsing UTXO: {err}"); continue; } }; @@ -2710,11 +2661,11 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let json_resp = BitcoinRPCRequest::send(&config, payload)?; + let json_resp = BitcoinRPCRequest::send(config, payload)?; if let Some(e) = json_resp.get("error") { if !e.is_null() { - error!("Error submitting transaction: {}", json_resp); + error!("Error submitting transaction: {json_resp}"); return Err(RPCError::Bitcoind(json_resp.to_string())); } } @@ -2756,9 +2707,9 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let result = BitcoinRPCRequest::send(&config, payload)?; + let result = BitcoinRPCRequest::send(config, payload)?; let checksum = result - .get(&"result".to_string()) + .get("result") .and_then(|res| res.as_object()) .and_then(|obj| obj.get("checksum")) .and_then(|checksum_val| checksum_val.as_str()) @@ -2770,13 +2721,13 @@ impl BitcoinRPCRequest { let payload = BitcoinRPCRequest { method: "importdescriptors".to_string(), params: vec![ - json!([{ "desc": format!("addr({})#{}", &addr2str(&address), &checksum), "timestamp": 0, "internal": true }]), + json!([{ "desc": format!("addr({})#{checksum}", &addr2str(&address)), "timestamp": 0, "internal": true }]), ], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; - BitcoinRPCRequest::send(&config, payload)?; + BitcoinRPCRequest::send(config, payload)?; } Ok(()) } @@ -2790,7 +2741,7 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - let mut res = BitcoinRPCRequest::send(&config, payload)?; + let mut res = BitcoinRPCRequest::send(config, payload)?; let mut wallets = Vec::new(); match res.as_object_mut() { Some(ref mut object) => match object.get_mut("result") { @@ -2799,7 +2750,7 @@ impl BitcoinRPCRequest { let parsed_wallet_name: String = match serde_json::from_value(entry) { Ok(wallet_name) => wallet_name, Err(err) => { - warn!("Failed parsing wallet name: {}", err); + warn!("Failed parsing wallet name: {err}"); continue; } }; @@ -2828,12 +2779,12 @@ impl BitcoinRPCRequest { jsonrpc: "2.0".to_string(), }; - BitcoinRPCRequest::send(&config, payload)?; + BitcoinRPCRequest::send(config, payload)?; Ok(()) } pub fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { - let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); + let request = BitcoinRPCRequest::build_rpc_request(config, &payload); let timeout = Duration::from_secs(u64::from(config.burnchain.timeout)); let host = request.preamble().host.hostname(); @@ -2841,9 +2792,9 @@ impl BitcoinRPCRequest { let response = send_http_request(&host, port, request, timeout)?; if let HttpResponsePayload::JSON(js) = response.destruct().1 { - return Ok(js); + Ok(js) } else { - return Err(RPCError::Parsing("Did not get a JSON response".into())); + Err(RPCError::Parsing("Did not get a JSON response".into())) } } } @@ -3025,12 +2976,12 @@ mod tests { Some(utxo_set), None, leader_fees, - &vec![], + &[], 2212, ) .unwrap(); - debug!("send_block_commit_operation:\n{:#?}", &block_commit); + debug!("send_block_commit_operation:\n{block_commit:#?}"); debug!("{}", &SerializedTx::new(block_commit.clone()).to_hex()); assert_eq!(block_commit.output[3].value, 323507); diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index d518f5bdea..15adebef95 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -14,7 +14,7 @@ use stacks::chainstate::burn::operations::{ }; use stacks::chainstate::burn::BlockSnapshot; use stacks::core::{ - StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + EpochList, StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, STACKS_EPOCH_MAX, }; use stacks_common::types::chainstate::{BurnchainHeaderHash, PoxId}; @@ -44,8 +44,8 @@ impl MocknetController { let burnchain = config.get_burnchain(); Self { - config: config, - burnchain: burnchain, + config, + burnchain, db: None, queued_operations: VecDeque::new(), chain_tip: None, @@ -54,7 +54,7 @@ impl MocknetController { fn build_next_block_header(current_block: &BlockSnapshot) -> BurnchainBlockHeader { let curr_hash = ¤t_block.burn_header_hash.to_bytes()[..]; - let next_hash = Sha256Sum::from_data(&curr_hash); + let next_hash = Sha256Sum::from_data(curr_hash); let block = BurnchainBlock::Bitcoin(BitcoinBlock::new( current_block.block_height + 1, @@ -99,10 +99,10 @@ impl BurnchainController for MocknetController { } } - fn get_stacks_epochs(&self) -> Vec { + fn get_stacks_epochs(&self) -> EpochList { match &self.config.burnchain.epochs { Some(epochs) => epochs.clone(), - None => vec![ + None => EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -124,7 +124,7 @@ impl BurnchainController for MocknetController { block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ], + ]), } } diff --git a/testnet/stacks-node/src/burnchains/mod.rs b/testnet/stacks-node/src/burnchains/mod.rs index 0c9446304d..dbf1533e73 100644 --- a/testnet/stacks-node/src/burnchains/mod.rs +++ b/testnet/stacks-node/src/burnchains/mod.rs @@ -1,7 +1,6 @@ pub mod bitcoin_regtest_controller; pub mod mocknet_controller; -use std::fmt; use std::time::Instant; use stacks::burnchains; @@ -9,48 +8,33 @@ use stacks::burnchains::{BurnchainStateTransitionOps, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::BlockstackOperationType; use stacks::chainstate::burn::BlockSnapshot; -use stacks::core::{StacksEpoch, StacksEpochId}; +use stacks::core::{EpochList, StacksEpochId}; use stacks_common::codec::Error as CodecError; pub use self::bitcoin_regtest_controller::{make_bitcoin_indexer, BitcoinRegtestController}; pub use self::mocknet_controller::MocknetController; use super::operations::BurnchainOpSigner; -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] pub enum Error { + #[error("ChainsCoordinator closed")] CoordinatorClosed, - IndexerError(burnchains::Error), + #[error("Indexer error: {0}")] + IndexerError(#[from] burnchains::Error), + #[error("Burnchain error")] BurnchainError, + #[error("Max fee rate exceeded")] MaxFeeRateExceeded, + #[error("Identical operation, not submitting")] IdenticalOperation, + #[error("No UTXOs available")] NoUTXOs, + #[error("Transaction submission failed: {0}")] TransactionSubmissionFailed(String), + #[error("Serializer error: {0}")] SerializerError(CodecError), } -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Error::CoordinatorClosed => write!(f, "ChainsCoordinator closed"), - Error::IndexerError(ref e) => write!(f, "Indexer error: {:?}", e), - Error::BurnchainError => write!(f, "Burnchain error"), - Error::MaxFeeRateExceeded => write!(f, "Max fee rate exceeded"), - Error::IdenticalOperation => write!(f, "Identical operation, not submitting"), - Error::NoUTXOs => write!(f, "No UTXOs available"), - Error::TransactionSubmissionFailed(e) => { - write!(f, "Transaction submission failed: {e}") - } - Error::SerializerError(e) => write!(f, "Serializer error: {e}"), - } - } -} - -impl From for Error { - fn from(e: burnchains::Error) -> Self { - Error::IndexerError(e) - } -} - pub trait BurnchainController { fn start(&mut self, target_block_height_opt: Option) -> Result<(BurnchainTip, u64), Error>; @@ -69,7 +53,7 @@ pub trait BurnchainController { /// Invoke connect() on underlying burnchain and sortition databases, to perform any migration /// or instantiation before other callers may use open() fn connect_dbs(&mut self) -> Result<(), Error>; - fn get_stacks_epochs(&self) -> Vec; + fn get_stacks_epochs(&self) -> EpochList; #[cfg(test)] fn bootstrap_chain(&mut self, blocks_count: u64); diff --git a/testnet/stacks-node/src/chain_data.rs b/testnet/stacks-node/src/chain_data.rs index b1e32c15ea..cc60f964a3 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/testnet/stacks-node/src/chain_data.rs @@ -81,7 +81,7 @@ impl MinerStats { { commits_at_sortition.push(missed); } else { - missed_commits_map.insert(missed.intended_sortition.clone(), vec![missed]); + missed_commits_map.insert(missed.intended_sortition, vec![missed]); } } @@ -106,8 +106,7 @@ impl MinerStats { &sortition_id, )?; if let Some(missed_commit_in_block) = missed_commits_map.remove(&sortition_id) { - missed_commits_at_height - .extend(missed_commit_in_block.into_iter().map(|x| x.clone())); + missed_commits_at_height.extend(missed_commit_in_block.into_iter().cloned()); } windowed_missed_commits.push(missed_commits_at_height); @@ -115,8 +114,7 @@ impl MinerStats { } else { // PoX reward-phase is not active debug!( - "Block {} is in a prepare phase or post-PoX sunset, so no windowing will take place", - burn_block_height; + "Block {burn_block_height} is in a prepare phase or post-PoX sunset, so no windowing will take place" ); assert_eq!(windowed_block_commits.len(), 1); @@ -197,19 +195,19 @@ impl MinerStats { .stderr(Stdio::piped()) .args(args); - debug!("Run: `{:?}`", &cmd); + debug!("Run: `{cmd:?}`"); let output = cmd .spawn() - .map_err(|e| format!("Failed to run `{}`: {:?}", &full_args, &e))? + .map_err(|e| format!("Failed to run `{full_args}`: {e:?}"))? .wait_with_output() - .map_err(|ioe| format!("Failed to run `{}`: {:?}", &full_args, &ioe))?; + .map_err(|ioe| format!("Failed to run `{full_args}`: {ioe:?}"))?; let exit_code = match output.status.code() { Some(code) => code, None => { // failed due to signal - return Err(format!("Failed to run `{}`: killed by signal", &full_args)); + return Err(format!("Failed to run `{full_args}`: killed by signal")); } }; @@ -223,11 +221,11 @@ impl MinerStats { all_miners: &[&str], ) -> Result, String> { let (exit_code, stdout, _stderr) = - Self::run_subprocess(&self.unconfirmed_commits_helper, &all_miners)?; + Self::run_subprocess(&self.unconfirmed_commits_helper, all_miners)?; if exit_code != 0 { return Err(format!( - "Failed to run `{}`: exit code {}", - &self.unconfirmed_commits_helper, exit_code + "Failed to run `{}`: exit code {exit_code}", + &self.unconfirmed_commits_helper )); } @@ -235,9 +233,8 @@ impl MinerStats { let unconfirmed_commits: Vec = serde_json::from_slice(&stdout) .map_err(|e| { format!( - "Failed to decode output from `{}`: {:?}. Output was `{}`", + "Failed to decode output from `{}`: {e:?}. Output was `{}`", &self.unconfirmed_commits_helper, - &e, String::from_utf8_lossy(&stdout) ) })?; @@ -255,22 +252,21 @@ impl MinerStats { }; let mut decoded_pox_addrs = vec![]; for pox_addr_hex in unconfirmed_commit.pox_addrs.iter() { - let Ok(pox_addr_bytes) = hex_bytes(&pox_addr_hex) else { - return Err(format!("Not a hex string: `{}`", &pox_addr_hex)); + let Ok(pox_addr_bytes) = hex_bytes(pox_addr_hex) else { + return Err(format!("Not a hex string: `{pox_addr_hex}`")); }; let Some(bitcoin_addr) = BitcoinAddress::from_scriptpubkey(BitcoinNetworkType::Mainnet, &pox_addr_bytes) else { return Err(format!( - "Not a recognized Bitcoin scriptpubkey: {}", - &pox_addr_hex + "Not a recognized Bitcoin scriptpubkey: {pox_addr_hex}" )); }; let Some(pox_addr) = PoxAddress::try_from_bitcoin_output(&BitcoinTxOutput { address: bitcoin_addr.clone(), units: 1, }) else { - return Err(format!("Not a recognized PoX address: {}", &bitcoin_addr)); + return Err(format!("Not a recognized PoX address: {bitcoin_addr}")); }; decoded_pox_addrs.push(pox_addr); } @@ -279,8 +275,8 @@ impl MinerStats { let mocked_commit = LeaderBlockCommitOp { treatment: vec![], sunset_burn: 0, - block_header_hash: BlockHeaderHash(DEADBEEF.clone()), - new_seed: VRFSeed(DEADBEEF.clone()), + block_header_hash: BlockHeaderHash(DEADBEEF), + new_seed: VRFSeed(DEADBEEF), parent_block_ptr: 1, parent_vtxindex: 1, key_block_ptr: 1, @@ -295,7 +291,7 @@ impl MinerStats { block_height: next_block_height, burn_parent_modulus: ((next_block_height.saturating_sub(1)) % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: BurnchainHeaderHash(DEADBEEF.clone()), + burn_header_hash: BurnchainHeaderHash(DEADBEEF), }; unconfirmed_spends.push(mocked_commit); @@ -306,7 +302,7 @@ impl MinerStats { /// Convert a list of burn sample points into a probability distribution by candidate's /// apparent sender (e.g. miner address). pub fn burn_dist_to_prob_dist(burn_dist: &[BurnSamplePoint]) -> HashMap { - if burn_dist.len() == 0 { + if burn_dist.is_empty() { return HashMap::new(); } if burn_dist.len() == 1 { @@ -343,13 +339,11 @@ impl MinerStats { if commit.commit_outs.len() != expected_pox_addrs.len() { return false; } - for i in 0..commit.commit_outs.len() { - if commit.commit_outs[i].to_burnchain_repr() - != expected_pox_addrs[i].to_burnchain_repr() - { + for (i, commit_out) in commit.commit_outs.iter().enumerate() { + if commit_out.to_burnchain_repr() != expected_pox_addrs[i].to_burnchain_repr() { info!( "Skipping invalid unconfirmed block-commit: {:?} != {:?}", - &commit.commit_outs[i].to_burnchain_repr(), + &commit_out.to_burnchain_repr(), expected_pox_addrs[i].to_burnchain_repr() ); return false; @@ -391,7 +385,7 @@ impl MinerStats { let (dist, total_spend) = Self::get_spend_distribution( active_miners_and_commits, unconfirmed_block_commits, - &expected_pox_addrs, + expected_pox_addrs, ); let mut probs = HashMap::new(); @@ -444,8 +438,8 @@ impl MinerStats { let mocked_commit = LeaderBlockCommitOp { treatment: vec![], sunset_burn: 0, - block_header_hash: BlockHeaderHash(DEADBEEF.clone()), - new_seed: VRFSeed(DEADBEEF.clone()), + block_header_hash: BlockHeaderHash(DEADBEEF), + new_seed: VRFSeed(DEADBEEF), parent_block_ptr: 2, parent_vtxindex: 2, key_block_ptr: 2, @@ -455,13 +449,13 @@ impl MinerStats { burn_fee: last_commit.burn_fee, input: (last_commit.txid, expected_input_index), apparent_sender: last_commit.apparent_sender.clone(), - txid: Txid(DEADBEEF.clone()), + txid: Txid(DEADBEEF), vtxindex: 1, block_height: next_block_height, burn_parent_modulus: ((next_block_height.saturating_sub(1)) % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: BurnchainHeaderHash(DEADBEEF.clone()), + burn_header_hash: BurnchainHeaderHash(DEADBEEF), }; commit_table.insert(miner.to_string(), mocked_commit); } @@ -473,13 +467,11 @@ impl MinerStats { if commit.commit_outs.len() != expected_pox_addrs.len() { return false; } - for i in 0..commit.commit_outs.len() { - if commit.commit_outs[i].to_burnchain_repr() - != expected_pox_addrs[i].to_burnchain_repr() - { + for (i, commit_out) in commit.commit_outs.iter().enumerate() { + if commit_out.to_burnchain_repr() != expected_pox_addrs[i].to_burnchain_repr() { info!( "Skipping invalid unconfirmed block-commit: {:?} != {:?}", - &commit.commit_outs[i].to_burnchain_repr(), + &commit_out.to_burnchain_repr(), expected_pox_addrs[i].to_burnchain_repr() ); return false; @@ -520,9 +512,7 @@ impl MinerStats { SortitionDB::get_block_commits_by_block(sortdb.conn(), &tip.sortition_id)?; for commit in commits.into_iter() { let miner = commit.apparent_sender.to_string(); - if miners.get(&miner).is_none() { - miners.insert(miner, commit); - } + miners.entry(miner).or_insert(commit); } tip = SortitionDB::get_block_snapshot(sortdb.conn(), &tip.parent_sortition_id)? .ok_or(DBError::NotFoundError)?; @@ -750,11 +740,11 @@ echo < { assert_eq!(spend, 2); @@ -1064,7 +1054,7 @@ EOF assert_eq!(spend, 10); } _ => { - panic!("unknown miner {}", &miner); + panic!("unknown miner {miner}"); } } } @@ -1082,7 +1072,7 @@ EOF ] { let prob = *win_probs .get(miner) - .unwrap_or_else(|| panic!("no probability for {}", &miner)); + .unwrap_or_else(|| panic!("no probability for {miner}")); match miner.as_str() { "miner-1" => { assert!((prob - (2.0 / 25.0)).abs() < 0.00001); @@ -1097,7 +1087,7 @@ EOF assert!((prob - (10.0 / 25.0)).abs() < 0.00001); } _ => { - panic!("unknown miner {}", &miner); + panic!("unknown miner {miner}"); } } } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 0beed9471d..785ce057e5 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -52,6 +52,7 @@ use stacks::net::atlas::AtlasConfig; use stacks::net::connection::ConnectionOptions; use stacks::net::{Neighbor, NeighborKey}; use stacks::types::chainstate::BurnchainHeaderHash; +use stacks::types::EpochList; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::db::Error as DBError; use stacks_common::consts::SIGNER_SLOTS_PER_USER; @@ -86,7 +87,11 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; const INV_REWARD_CYCLES_TESTNET: u64 = 6; -const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1000; +const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1_000; +const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; +const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; +const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; +const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; #[derive(Clone, Deserialize, Default, Debug)] #[serde(deny_unknown_fields)] @@ -106,15 +111,16 @@ pub struct ConfigFile { impl ConfigFile { pub fn from_path(path: &str) -> Result { - let content = fs::read_to_string(path).map_err(|e| format!("Invalid path: {}", &e))?; + let content = fs::read_to_string(path).map_err(|e| format!("Invalid path: {e}"))?; let mut f = Self::from_str(&content)?; f.__path = Some(path.to_string()); Ok(f) } + #[allow(clippy::should_implement_trait)] pub fn from_str(content: &str) -> Result { let mut config: ConfigFile = - toml::from_str(content).map_err(|e| format!("Invalid toml: {}", e))?; + toml::from_str(content).map_err(|e| format!("Invalid toml: {e}"))?; if let Some(mstx_balance) = config.mstx_balance.take() { warn!("'mstx_balance' in the config is deprecated; please use 'ustx_balance' instead."); match config.ustx_balance { @@ -365,7 +371,7 @@ impl Config { let Ok(config) = Config::from_config_file(config_file, false) else { return self.miner.clone(); }; - return config.miner; + config.miner } pub fn get_node_config(&self, resolve_bootstrap_nodes: bool) -> NodeConfig { @@ -378,7 +384,7 @@ impl Config { let Ok(config) = Config::from_config_file(config_file, resolve_bootstrap_nodes) else { return self.node.clone(); }; - return config.node; + config.node } /// Apply any test settings to this burnchain config struct @@ -390,26 +396,26 @@ impl Config { if let Some(first_burn_block_height) = self.burnchain.first_burn_block_height { debug!( - "Override first_block_height from {} to {}", - burnchain.first_block_height, first_burn_block_height + "Override first_block_height from {} to {first_burn_block_height}", + burnchain.first_block_height ); burnchain.first_block_height = first_burn_block_height; } if let Some(first_burn_block_timestamp) = self.burnchain.first_burn_block_timestamp { debug!( - "Override first_block_timestamp from {} to {}", - burnchain.first_block_timestamp, first_burn_block_timestamp + "Override first_block_timestamp from {} to {first_burn_block_timestamp}", + burnchain.first_block_timestamp ); burnchain.first_block_timestamp = first_burn_block_timestamp; } if let Some(first_burn_block_hash) = &self.burnchain.first_burn_block_hash { debug!( - "Override first_burn_block_hash from {} to {}", - burnchain.first_block_hash, first_burn_block_hash + "Override first_burn_block_hash from {} to {first_burn_block_hash}", + burnchain.first_block_hash ); - burnchain.first_block_hash = BurnchainHeaderHash::from_hex(&first_burn_block_hash) + burnchain.first_block_hash = BurnchainHeaderHash::from_hex(first_burn_block_hash) .expect("Invalid first_burn_block_hash"); } @@ -425,17 +431,14 @@ impl Config { if let Some(v1_unlock_height) = self.burnchain.pox_2_activation { debug!( - "Override v1_unlock_height from {} to {}", - burnchain.pox_constants.v1_unlock_height, v1_unlock_height + "Override v1_unlock_height from {} to {v1_unlock_height}", + burnchain.pox_constants.v1_unlock_height ); burnchain.pox_constants.v1_unlock_height = v1_unlock_height; } if let Some(epochs) = &self.burnchain.epochs { - if let Some(epoch) = epochs - .iter() - .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch10) - { + if let Some(epoch) = epochs.get(StacksEpochId::Epoch10) { // Epoch 1.0 start height can be equal to the first block height iff epoch 2.0 // start height is also equal to the first block height. assert!( @@ -444,20 +447,14 @@ impl Config { ); } - if let Some(epoch) = epochs - .iter() - .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch20) - { + if let Some(epoch) = epochs.get(StacksEpochId::Epoch20) { assert_eq!( epoch.start_height, burnchain.first_block_height, "FATAL: Epoch 2.0 start height must match the first block height" ); } - if let Some(epoch) = epochs - .iter() - .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch21) - { + if let Some(epoch) = epochs.get(StacksEpochId::Epoch21) { // Override v1_unlock_height to the start_height of epoch2.1 debug!( "Override v2_unlock_height from {} to {}", @@ -467,10 +464,7 @@ impl Config { burnchain.pox_constants.v1_unlock_height = epoch.start_height as u32 + 1; } - if let Some(epoch) = epochs - .iter() - .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch22) - { + if let Some(epoch) = epochs.get(StacksEpochId::Epoch22) { // Override v2_unlock_height to the start_height of epoch2.2 debug!( "Override v2_unlock_height from {} to {}", @@ -480,10 +474,7 @@ impl Config { burnchain.pox_constants.v2_unlock_height = epoch.start_height as u32 + 1; } - if let Some(epoch) = epochs - .iter() - .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch24) - { + if let Some(epoch) = epochs.get(StacksEpochId::Epoch24) { // Override pox_3_activation_height to the start_height of epoch2.4 debug!( "Override pox_3_activation_height from {} to {}", @@ -492,10 +483,7 @@ impl Config { burnchain.pox_constants.pox_3_activation_height = epoch.start_height as u32; } - if let Some(epoch) = epochs - .iter() - .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch25) - { + if let Some(epoch) = epochs.get(StacksEpochId::Epoch25) { // Override pox_4_activation_height to the start_height of epoch2.5 debug!( "Override pox_4_activation_height from {} to {}", @@ -508,22 +496,22 @@ impl Config { if let Some(sunset_start) = self.burnchain.sunset_start { debug!( - "Override sunset_start from {} to {}", - burnchain.pox_constants.sunset_start, sunset_start + "Override sunset_start from {} to {sunset_start}", + burnchain.pox_constants.sunset_start ); burnchain.pox_constants.sunset_start = sunset_start.into(); } if let Some(sunset_end) = self.burnchain.sunset_end { debug!( - "Override sunset_end from {} to {}", - burnchain.pox_constants.sunset_end, sunset_end + "Override sunset_end from {} to {sunset_end}", + burnchain.pox_constants.sunset_end ); burnchain.pox_constants.sunset_end = sunset_end.into(); } // check if the Epoch 3.0 burnchain settings as configured are going to be valid. - self.check_nakamoto_config(&burnchain); + self.check_nakamoto_config(burnchain); } fn check_nakamoto_config(&self, burnchain: &Burnchain) { @@ -531,9 +519,7 @@ impl Config { self.burnchain.get_bitcoin_network().1, self.burnchain.epochs.as_ref(), ); - let Some(epoch_30) = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30) - .map(|epoch_ix| epochs[epoch_ix].clone()) - else { + let Some(epoch_30) = epochs.get(StacksEpochId::Epoch30) else { // no Epoch 3.0, so just return return; }; @@ -592,7 +578,7 @@ impl Config { match Burnchain::new(&working_dir, &self.burnchain.chain, &network_name) { Ok(burnchain) => burnchain, Err(e) => { - error!("Failed to instantiate burnchain: {}", e); + error!("Failed to instantiate burnchain: {e}"); panic!() } } @@ -610,15 +596,14 @@ impl Config { let _ = StacksEpoch::validate_epochs(epochs); // sanity check: v1_unlock_height must happen after pox-2 instantiation - let epoch21_index = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch21) + let epoch21_index = StacksEpoch::find_epoch_by_id(epochs, StacksEpochId::Epoch21) .expect("FATAL: no epoch 2.1 defined"); - let epoch21 = &epochs[epoch21_index]; let v1_unlock_height = burnchain.pox_constants.v1_unlock_height as u64; assert!( v1_unlock_height > epoch21.start_height, - "FATAL: v1 unlock height occurs at or before pox-2 activation: {} <= {}\nburnchain: {:?}", v1_unlock_height, epoch21.start_height, burnchain + "FATAL: v1 unlock height occurs at or before pox-2 activation: {v1_unlock_height} <= {}\nburnchain: {burnchain:?}", epoch21.start_height ); let epoch21_rc = burnchain @@ -633,8 +618,7 @@ impl Config { // the reward cycle boundary. assert!( !burnchain.is_reward_cycle_start(v1_unlock_height), - "FATAL: v1 unlock height is at a reward cycle boundary\nburnchain: {:?}", - burnchain + "FATAL: v1 unlock height is at a reward cycle boundary\nburnchain: {burnchain:?}" ); } } @@ -646,7 +630,7 @@ impl Config { burn_mode: &str, bitcoin_network: BitcoinNetworkType, pox_2_activation: Option, - ) -> Result, String> { + ) -> Result, String> { let default_epochs = match bitcoin_network { BitcoinNetworkType::Mainnet => { Err("Cannot configure epochs in mainnet mode".to_string()) @@ -676,7 +660,7 @@ impl Config { } else if epoch_name == EPOCH_CONFIG_3_0_0 { Ok(StacksEpochId::Epoch30) } else { - Err(format!("Unknown epoch name specified: {}", epoch_name)) + Err(format!("Unknown epoch name specified: {epoch_name}")) }?; matched_epochs.push((epoch_id, configured_epoch.start_height)); } @@ -707,9 +691,7 @@ impl Config { .zip(matched_epochs.iter().map(|(epoch_id, _)| epoch_id)) { if expected_epoch != configured_epoch { - return Err(format!( - "Configured epochs may not skip an epoch. Expected epoch = {}, Found epoch = {}", - expected_epoch, configured_epoch)); + return Err(format!("Configured epochs may not skip an epoch. Expected epoch = {expected_epoch}, Found epoch = {configured_epoch}")); } } @@ -729,8 +711,8 @@ impl Config { for (i, (epoch_id, start_height)) in matched_epochs.iter().enumerate() { if epoch_id != &out_epochs[i].epoch_id { return Err( - format!("Unmatched epochs in configuration and node implementation. Implemented = {}, Configured = {}", - epoch_id, &out_epochs[i].epoch_id)); + format!("Unmatched epochs in configuration and node implementation. Implemented = {epoch_id}, Configured = {}", + &out_epochs[i].epoch_id)); } // end_height = next epoch's start height || i64::max if last epoch let end_height = if i + 1 < matched_epochs.len() { @@ -756,11 +738,11 @@ impl Config { .find(|&e| e.epoch_id == StacksEpochId::Epoch21) .ok_or("Cannot configure pox_2_activation if epoch 2.1 is not configured")?; if last_epoch.start_height > pox_2_activation as u64 { - Err(format!("Cannot configure pox_2_activation at a lower height than the Epoch 2.1 start height. pox_2_activation = {}, epoch 2.1 start height = {}", pox_2_activation, last_epoch.start_height))?; + Err(format!("Cannot configure pox_2_activation at a lower height than the Epoch 2.1 start height. pox_2_activation = {pox_2_activation}, epoch 2.1 start height = {}", last_epoch.start_height))?; } } - Ok(out_epochs) + Ok(EpochList::new(&out_epochs)) } pub fn from_config_file( @@ -808,7 +790,7 @@ impl Config { } if burnchain.mode == "helium" && burnchain.local_mining_public_key.is_none() { - return Err(format!("Config is missing the setting `burnchain.local_mining_public_key` (mandatory for helium)")); + return Err("Config is missing the setting `burnchain.local_mining_public_key` (mandatory for helium)".into()); } let is_mainnet = burnchain.mode == "mainnet"; @@ -832,27 +814,17 @@ impl Config { burnchain.peer_version, ); } - } else { - if is_mainnet && resolve_bootstrap_nodes { - let bootstrap_node = ConfigFile::mainnet().node.unwrap().bootstrap_node.unwrap(); - node.set_bootstrap_nodes( - bootstrap_node, - burnchain.chain_id, - burnchain.peer_version, - ); - } + } else if is_mainnet && resolve_bootstrap_nodes { + let bootstrap_node = ConfigFile::mainnet().node.unwrap().bootstrap_node.unwrap(); + node.set_bootstrap_nodes(bootstrap_node, burnchain.chain_id, burnchain.peer_version); } if let Some(deny_nodes) = deny_nodes { node.set_deny_nodes(deny_nodes, burnchain.chain_id, burnchain.peer_version); } // Validate the node config - if is_mainnet { - if node.use_test_genesis_chainstate == Some(true) { - return Err(format!( - "Attempted to run mainnet node with `use_test_genesis_chainstate`" - )); - } + if is_mainnet && node.use_test_genesis_chainstate == Some(true) { + return Err("Attempted to run mainnet node with `use_test_genesis_chainstate`".into()); } if node.stacker || node.miner { @@ -867,10 +839,10 @@ impl Config { let initial_balances: Vec = match config_file.ustx_balance { Some(balances) => { - if is_mainnet && balances.len() > 0 { - return Err(format!( - "Attempted to run mainnet node with specified `initial_balances`" - )); + if is_mainnet && !balances.is_empty() { + return Err( + "Attempted to run mainnet node with specified `initial_balances`".into(), + ); } balances .iter() @@ -911,16 +883,12 @@ impl Config { }; // check for observer config in env vars - match std::env::var("STACKS_EVENT_OBSERVER") { - Ok(val) => { - events_observers.insert(EventObserverConfig { - endpoint: val, - events_keys: vec![EventKeyType::AnyEvent], - timeout_ms: 1_000, - }); - () - } - _ => (), + if let Ok(val) = std::env::var("STACKS_EVENT_OBSERVER") { + events_observers.insert(EventObserverConfig { + endpoint: val, + events_keys: vec![EventKeyType::AnyEvent], + timeout_ms: 1_000, + }); }; let connection_options = match config_file.connection_options { @@ -1068,14 +1036,11 @@ impl Config { } pub fn is_mainnet(&self) -> bool { - match self.burnchain.mode.as_str() { - "mainnet" => true, - _ => false, - } + matches!(self.burnchain.mode.as_str(), "mainnet") } pub fn is_node_event_driven(&self) -> bool { - self.events_observers.len() > 0 + !self.events_observers.is_empty() } pub fn make_nakamoto_block_builder_settings( @@ -1092,6 +1057,8 @@ impl Config { candidate_retry_cache_size: miner_config.candidate_retry_cache_size, txs_to_consider: miner_config.txs_to_consider, filter_origins: miner_config.filter_origins, + tenure_cost_limit_per_block_percentage: miner_config + .tenure_cost_limit_per_block_percentage, }, miner_status, confirm_microblocks: false, @@ -1132,6 +1099,8 @@ impl Config { candidate_retry_cache_size: miner_config.candidate_retry_cache_size, txs_to_consider: miner_config.txs_to_consider, filter_origins: miner_config.filter_origins, + tenure_cost_limit_per_block_percentage: miner_config + .tenure_cost_limit_per_block_percentage, }, miner_status, confirm_microblocks: true, @@ -1155,12 +1124,11 @@ impl Config { /// part dependent on the state machine getting block data back to the miner quickly, and thus /// the poll time is dependent on the first attempt time. pub fn get_poll_time(&self) -> u64 { - let poll_timeout = if self.node.miner { + if self.node.miner { cmp::min(1000, self.miner.first_attempt_time_ms / 2) } else { 1000 - }; - poll_timeout + } } } @@ -1217,7 +1185,7 @@ pub struct BurnchainConfig { pub first_burn_block_hash: Option, /// Custom override for the definitions of the epochs. This will only be applied for testnet and /// regtest nodes. - pub epochs: Option>, + pub epochs: Option>, pub pox_2_activation: Option, pub pox_reward_length: Option, pub pox_prepare_length: Option, @@ -1251,7 +1219,7 @@ impl BurnchainConfig { username: None, password: None, timeout: 60, - magic_bytes: BLOCKSTACK_MAGIC_MAINNET.clone(), + magic_bytes: BLOCKSTACK_MAGIC_MAINNET, local_mining_public_key: None, process_exit_at_block_height: None, poll_time_secs: 10, // TODO: this is a testnet specific value. @@ -1282,22 +1250,18 @@ impl BurnchainConfig { false => "http://", }; let wallet_path = if let Some(wallet_id) = wallet.as_ref() { - format!("/wallet/{}", wallet_id) + format!("/wallet/{wallet_id}") } else { "".to_string() }; - format!( - "{}{}:{}{}", - scheme, self.peer_host, self.rpc_port, wallet_path - ) + format!("{scheme}{}:{}{wallet_path}", self.peer_host, self.rpc_port) } pub fn get_rpc_socket_addr(&self) -> SocketAddr { let mut addrs_iter = format!("{}:{}", self.peer_host, self.rpc_port) .to_socket_addrs() .unwrap(); - let sock_addr = addrs_iter.next().unwrap(); - sock_addr + addrs_iter.next().unwrap() } pub fn get_bitcoin_network(&self) -> (String, BitcoinNetworkType) { @@ -1318,15 +1282,15 @@ pub struct StacksEpochConfigFile { start_height: i64, } -pub const EPOCH_CONFIG_1_0_0: &'static str = "1.0"; -pub const EPOCH_CONFIG_2_0_0: &'static str = "2.0"; -pub const EPOCH_CONFIG_2_0_5: &'static str = "2.05"; -pub const EPOCH_CONFIG_2_1_0: &'static str = "2.1"; -pub const EPOCH_CONFIG_2_2_0: &'static str = "2.2"; -pub const EPOCH_CONFIG_2_3_0: &'static str = "2.3"; -pub const EPOCH_CONFIG_2_4_0: &'static str = "2.4"; -pub const EPOCH_CONFIG_2_5_0: &'static str = "2.5"; -pub const EPOCH_CONFIG_3_0_0: &'static str = "3.0"; +pub const EPOCH_CONFIG_1_0_0: &str = "1.0"; +pub const EPOCH_CONFIG_2_0_0: &str = "2.0"; +pub const EPOCH_CONFIG_2_0_5: &str = "2.05"; +pub const EPOCH_CONFIG_2_1_0: &str = "2.1"; +pub const EPOCH_CONFIG_2_2_0: &str = "2.2"; +pub const EPOCH_CONFIG_2_3_0: &str = "2.3"; +pub const EPOCH_CONFIG_2_4_0: &str = "2.4"; +pub const EPOCH_CONFIG_2_5_0: &str = "2.5"; +pub const EPOCH_CONFIG_3_0_0: &str = "3.0"; #[derive(Clone, Deserialize, Default, Debug)] pub struct AffirmationOverride { @@ -1521,15 +1485,14 @@ impl BurnchainConfigFile { // Using std::net::LookupHost would be preferable, but it's // unfortunately unstable at this point. // https://doc.rust-lang.org/1.6.0/std/net/struct.LookupHost.html - let mut sock_addrs = format!("{}:1", &peer_host) + let mut sock_addrs = format!("{peer_host}:1") .to_socket_addrs() - .map_err(|e| format!("Invalid burnchain.peer_host: {}", &e))?; + .map_err(|e| format!("Invalid burnchain.peer_host: {e}"))?; let sock_addr = match sock_addrs.next() { Some(addr) => addr, None => { return Err(format!( - "No IP address could be queried for '{}'", - &peer_host + "No IP address could be queried for '{peer_host}'" )); } }; @@ -1726,10 +1689,7 @@ impl CostEstimatorName { if &s.to_lowercase() == "naive_pessimistic" { CostEstimatorName::NaivePessimistic } else { - panic!( - "Bad cost estimator name supplied in configuration file: {}", - s - ); + panic!("Bad cost estimator name supplied in configuration file: {s}"); } } } @@ -1741,10 +1701,7 @@ impl FeeEstimatorName { } else if &s.to_lowercase() == "fuzzed_weighted_median_fee_rate" { FeeEstimatorName::FuzzedWeightedMedianFeeRate } else { - panic!( - "Bad fee estimator name supplied in configuration file: {}", - s - ); + panic!("Bad fee estimator name supplied in configuration file: {s}"); } } } @@ -1754,7 +1711,7 @@ impl CostMetricName { if &s.to_lowercase() == "proportion_dot_product" { CostMetricName::ProportionDotProduct } else { - panic!("Bad cost metric name supplied in configuration file: {}", s); + panic!("Bad cost metric name supplied in configuration file: {s}"); } } } @@ -1924,7 +1881,7 @@ impl Default for NodeConfig { rng.fill_bytes(&mut buf); let now = get_epoch_time_ms(); - let testnet_id = format!("stacks-node-{}", now); + let testnet_id = format!("stacks-node-{now}"); let rpc_port = 20443; let p2p_port = 20444; @@ -1939,11 +1896,11 @@ impl Default for NodeConfig { NodeConfig { name: name.to_string(), seed: seed.to_vec(), - working_dir: format!("/tmp/{}", testnet_id), - rpc_bind: format!("0.0.0.0:{}", rpc_port), - p2p_bind: format!("0.0.0.0:{}", p2p_port), - data_url: format!("http://127.0.0.1:{}", rpc_port), - p2p_address: format!("127.0.0.1:{}", rpc_port), + working_dir: format!("/tmp/{testnet_id}"), + rpc_bind: format!("0.0.0.0:{rpc_port}"), + p2p_bind: format!("0.0.0.0:{p2p_port}"), + data_url: format!("http://127.0.0.1:{rpc_port}"), + p2p_address: format!("127.0.0.1:{rpc_port}"), bootstrap_node: vec![], deny_nodes: vec![], local_peer_seed: local_peer_seed.to_vec(), @@ -1976,9 +1933,8 @@ impl NodeConfig { /// Get a SocketAddr for this node's RPC endpoint which uses the loopback address pub fn get_rpc_loopback(&self) -> Option { let rpc_port = SocketAddr::from_str(&self.rpc_bind) - .or_else(|e| { + .map_err(|e| { error!("Could not parse node.rpc_bind configuration setting as SocketAddr: {e}"); - Err(()) }) .ok()? .port(); @@ -2032,15 +1988,12 @@ impl NodeConfig { pub fn add_bootstrap_node(&mut self, bootstrap_node: &str, chain_id: u32, peer_version: u32) { let parts: Vec<&str> = bootstrap_node.split('@').collect(); if parts.len() != 2 { - panic!( - "Invalid bootstrap node '{}': expected PUBKEY@IP:PORT", - bootstrap_node - ); + panic!("Invalid bootstrap node '{bootstrap_node}': expected PUBKEY@IP:PORT"); } let (pubkey_str, hostport) = (parts[0], parts[1]); let pubkey = Secp256k1PublicKey::from_hex(pubkey_str) .unwrap_or_else(|_| panic!("Invalid public key '{pubkey_str}'")); - debug!("Resolve '{}'", &hostport); + debug!("Resolve '{hostport}'"); let mut attempts = 0; let max_attempts = 5; @@ -2052,22 +2005,16 @@ impl NodeConfig { if let Some(addr) = addrs.next() { break addr; } else { - panic!("No addresses found for '{}'", hostport); + panic!("No addresses found for '{hostport}'"); } } Err(e) => { if attempts >= max_attempts { - panic!( - "Failed to resolve '{}' after {} attempts: {}", - hostport, max_attempts, e - ); + panic!("Failed to resolve '{hostport}' after {max_attempts} attempts: {e}"); } else { error!( - "Attempt {} - Failed to resolve '{}': {}. Retrying in {:?}...", + "Attempt {} - Failed to resolve '{hostport}': {e}. Retrying in {delay:?}...", attempts + 1, - hostport, - e, - delay ); thread::sleep(delay); attempts += 1; @@ -2088,8 +2035,8 @@ impl NodeConfig { peer_version: u32, ) { for part in bootstrap_nodes.split(',') { - if part.len() > 0 { - self.add_bootstrap_node(&part, chain_id, peer_version); + if !part.is_empty() { + self.add_bootstrap_node(part, chain_id, peer_version); } } } @@ -2107,8 +2054,8 @@ impl NodeConfig { pub fn set_deny_nodes(&mut self, deny_nodes: String, chain_id: u32, peer_version: u32) { for part in deny_nodes.split(',') { - if part.len() > 0 { - self.add_deny_node(&part, chain_id, peer_version); + if !part.is_empty() { + self.add_deny_node(part, chain_id, peer_version); } } } @@ -2122,10 +2069,7 @@ impl NodeConfig { MARFOpenOpts::new( hash_mode, - &self - .marf_cache_strategy - .as_ref() - .unwrap_or(&"noop".to_string()), + self.marf_cache_strategy.as_deref().unwrap_or("noop"), false, ) } @@ -2183,6 +2127,14 @@ pub struct MinerConfig { /// The minimum time to wait between mining blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined /// within the same second as its parent, it will be rejected by the signers. pub min_time_between_blocks_ms: u64, + /// Time in milliseconds to pause after receiving the first threshold rejection, before proposing a new block. + pub first_rejection_pause_ms: u64, + /// Time in milliseconds to pause after receiving subsequent threshold rejections, before proposing a new block. + pub subsequent_rejection_pause_ms: u64, + /// Duration to wait for a Nakamoto block after seeing a burnchain block before submitting a block commit. + pub block_commit_delay: Duration, + /// The percentage of the remaining tenure cost limit to consume each block. + pub tenure_cost_limit_per_block_percentage: Option, } impl Default for MinerConfig { @@ -2213,6 +2165,12 @@ impl Default for MinerConfig { max_reorg_depth: 3, pre_nakamoto_mock_signing: false, // Should only default true if mining key is set min_time_between_blocks_ms: DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS, + first_rejection_pause_ms: DEFAULT_FIRST_REJECTION_PAUSE_MS, + subsequent_rejection_pause_ms: DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS, + block_commit_delay: Duration::from_millis(DEFAULT_BLOCK_COMMIT_DELAY_MS), + tenure_cost_limit_per_block_percentage: Some( + DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE, + ), } } } @@ -2264,6 +2222,7 @@ pub struct ConnectionOptionsFile { pub private_neighbors: Option, pub auth_token: Option, pub antientropy_retry: Option, + pub reject_blocks_pushed: Option, } impl ConnectionOptionsFile { @@ -2274,27 +2233,27 @@ impl ConnectionOptionsFile { public_ip_address .parse::() .map(|addr| (PeerAddress::from_socketaddr(&addr), addr.port())) - .map_err(|e| format!("Invalid connection_option.public_ip_address: {}", e)) + .map_err(|e| format!("Invalid connection_option.public_ip_address: {e}")) }) .transpose()?; let mut read_only_call_limit = HELIUM_DEFAULT_CONNECTION_OPTIONS .read_only_call_limit .clone(); - self.read_only_call_limit_write_length.map(|x| { + if let Some(x) = self.read_only_call_limit_write_length { read_only_call_limit.write_length = x; - }); - self.read_only_call_limit_write_count.map(|x| { + } + if let Some(x) = self.read_only_call_limit_write_count { read_only_call_limit.write_count = x; - }); - self.read_only_call_limit_read_length.map(|x| { + } + if let Some(x) = self.read_only_call_limit_read_length { read_only_call_limit.read_length = x; - }); - self.read_only_call_limit_read_count.map(|x| { + } + if let Some(x) = self.read_only_call_limit_read_count { read_only_call_limit.read_count = x; - }); - self.read_only_call_limit_runtime.map(|x| { + } + if let Some(x) = self.read_only_call_limit_runtime { read_only_call_limit.runtime = x; - }); + }; let default = ConnectionOptions::default(); Ok(ConnectionOptions { read_only_call_limit, @@ -2345,7 +2304,7 @@ impl ConnectionOptionsFile { .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.soft_max_clients_per_host), walk_interval: self .walk_interval - .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_interval.clone()), + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_interval), walk_seed_probability: self .walk_seed_probability .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.walk_seed_probability), @@ -2367,7 +2326,7 @@ impl ConnectionOptionsFile { .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.maximum_call_argument_size), download_interval: self .download_interval - .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.download_interval.clone()), + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.download_interval), inv_sync_interval: self .inv_sync_interval .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.inv_sync_interval), @@ -2388,7 +2347,7 @@ impl ConnectionOptionsFile { force_disconnect_interval: self.force_disconnect_interval, max_http_clients: self .max_http_clients - .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.max_http_clients.clone()), + .unwrap_or_else(|| HELIUM_DEFAULT_CONNECTION_OPTIONS.max_http_clients), connect_timeout: self.connect_timeout.unwrap_or(10), handshake_timeout: self.handshake_timeout.unwrap_or(5), max_sockets: self.max_sockets.unwrap_or(800) as usize, @@ -2396,6 +2355,9 @@ impl ConnectionOptionsFile { private_neighbors: self.private_neighbors.unwrap_or(true), auth_token: self.auth_token, antientropy_retry: self.antientropy_retry.unwrap_or(default.antientropy_retry), + reject_blocks_pushed: self + .reject_blocks_pushed + .unwrap_or(default.reject_blocks_pushed), ..default }) } @@ -2449,7 +2411,7 @@ impl NodeConfigFile { name: self.name.unwrap_or(default_node_config.name), seed: match self.seed { Some(seed) => hex_bytes(&seed) - .map_err(|_e| format!("node.seed should be a hex encoded string"))?, + .map_err(|_e| "node.seed should be a hex encoded string".to_string())?, None => default_node_config.seed, }, working_dir: std::env::var("STACKS_WORKING_DIR") @@ -2463,8 +2425,9 @@ impl NodeConfigFile { .data_url .unwrap_or_else(|| format!("http://{rpc_bind}")), local_peer_seed: match self.local_peer_seed { - Some(seed) => hex_bytes(&seed) - .map_err(|_e| format!("node.local_peer_seed should be a hex encoded string"))?, + Some(seed) => hex_bytes(&seed).map_err(|_e| { + "node.local_peer_seed should be a hex encoded string".to_string() + })?, None => default_node_config.local_peer_seed, }, miner, @@ -2519,7 +2482,7 @@ impl NodeConfigFile { .unwrap_or(default_node_config.chain_liveness_poll_time_secs), stacker_dbs: self .stacker_dbs - .unwrap_or(vec![]) + .unwrap_or_default() .iter() .filter_map(|contract_id| QualifiedContractIdentifier::parse(contract_id).ok()) .collect(), @@ -2575,6 +2538,10 @@ pub struct MinerConfigFile { pub max_reorg_depth: Option, pub pre_nakamoto_mock_signing: Option, pub min_time_between_blocks_ms: Option, + pub first_rejection_pause_ms: Option, + pub subsequent_rejection_pause_ms: Option, + pub block_commit_delay_ms: Option, + pub tenure_cost_limit_per_block_percentage: Option, } impl MinerConfigFile { @@ -2585,6 +2552,22 @@ impl MinerConfigFile { .map(|x| Secp256k1PrivateKey::from_hex(x)) .transpose()?; let pre_nakamoto_mock_signing = mining_key.is_some(); + + let tenure_cost_limit_per_block_percentage = + if let Some(percentage) = self.tenure_cost_limit_per_block_percentage { + if percentage == 100 { + None + } else if percentage > 0 && percentage < 100 { + Some(percentage) + } else { + return Err( + "miner.tenure_cost_limit_per_block_percentage must be between 1 and 100" + .to_string(), + ); + } + } else { + miner_default_config.tenure_cost_limit_per_block_percentage + }; Ok(MinerConfig { first_attempt_time_ms: self .first_attempt_time_ms @@ -2652,7 +2635,7 @@ impl MinerConfigFile { |txs_to_consider_str| match str::parse(txs_to_consider_str) { Ok(txtype) => txtype, Err(e) => { - panic!("could not parse '{}': {}", &txs_to_consider_str, &e); + panic!("could not parse '{txs_to_consider_str}': {e}"); } }, ) @@ -2668,7 +2651,7 @@ impl MinerConfigFile { .map(|origin_str| match StacksAddress::from_string(origin_str) { Some(addr) => addr, None => { - panic!("could not parse '{}' into a Stacks address", origin_str); + panic!("could not parse '{origin_str}' into a Stacks address"); } }) .collect() @@ -2688,6 +2671,10 @@ impl MinerConfigFile { } else { ms }).unwrap_or(miner_default_config.min_time_between_blocks_ms), + first_rejection_pause_ms: self.first_rejection_pause_ms.unwrap_or(miner_default_config.first_rejection_pause_ms), + subsequent_rejection_pause_ms: self.subsequent_rejection_pause_ms.unwrap_or(miner_default_config.subsequent_rejection_pause_ms), + block_commit_delay: self.block_commit_delay_ms.map(Duration::from_millis).unwrap_or(miner_default_config.block_commit_delay), + tenure_cost_limit_per_block_percentage, }) } } @@ -2702,6 +2689,7 @@ pub struct AtlasConfigFile { impl AtlasConfigFile { // Can't inplement `Into` trait because this takes a parameter + #[allow(clippy::wrong_self_convention)] fn into_config(&self, mainnet: bool) -> AtlasConfig { let mut conf = AtlasConfig::new(mainnet); if let Some(val) = self.attachments_max_size { @@ -3003,7 +2991,7 @@ mod tests { "#, ) .unwrap_err(); - println!("{}", err); + println!("{err}"); assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); } @@ -3024,7 +3012,7 @@ mod tests { fn test_example_confs() { // For each config file in the ../conf/ directory, we should be able to parse it let conf_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("conf"); - println!("Reading config files from: {:?}", conf_dir); + println!("Reading config files from: {conf_dir:?}"); let conf_files = fs::read_dir(conf_dir).unwrap(); for entry in conf_files { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index bb05cd6128..8144cd8ec5 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -18,7 +18,7 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::path::PathBuf; use std::sync::mpsc::{channel, Receiver, Sender}; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; use std::thread::sleep; use std::time::Duration; @@ -107,17 +107,8 @@ pub const PATH_BLOCK_PROCESSED: &str = "new_block"; pub const PATH_ATTACHMENT_PROCESSED: &str = "attachments/new"; pub const PATH_PROPOSAL_RESPONSE: &str = "proposal_response"; -pub static STACKER_DB_CHANNEL: StackerDBChannel = StackerDBChannel::new(); - /// This struct receives StackerDB event callbacks without registering -/// over the JSON/RPC interface. To ensure that any event observer -/// uses the same channel, we use a lazy_static global for the channel (this -/// implements a singleton using STACKER_DB_CHANNEL). -/// -/// This is in place because a Nakamoto miner needs to receive -/// StackerDB events. It could either poll the database (seems like a -/// bad idea) or listen for events. Registering for RPC callbacks -/// seems bad. So instead, it uses a singleton sync channel. +/// over the JSON/RPC interface. pub struct StackerDBChannel { sender_info: Mutex>, } @@ -181,6 +172,12 @@ impl InnerStackerDBChannel { } } +impl Default for StackerDBChannel { + fn default() -> Self { + Self::new() + } +} + impl StackerDBChannel { pub const fn new() -> Self { Self { @@ -256,7 +253,7 @@ where serializer.serialize_str(&value.to_string()) } -fn serialize_pox_addresses(value: &Vec, serializer: S) -> Result +fn serialize_pox_addresses(value: &[PoxAddress], serializer: S) -> Result where S: serde::Serializer, { @@ -374,7 +371,7 @@ impl EventObserver { } Err(err) => { // Log the error, then retry after a delay - warn!("Failed to insert payload into event observer database: {:?}", err; + warn!("Failed to insert payload into event observer database: {err:?}"; "backoff" => ?backoff, "attempts" => attempts ); @@ -402,8 +399,8 @@ impl EventObserver { let id: i64 = row.get(0)?; let url: String = row.get(1)?; let payload_text: String = row.get(2)?; - let payload: serde_json::Value = serde_json::from_str(&payload_text) - .map_err(|e| db_error::SerializationError(e))?; + let payload: serde_json::Value = + serde_json::from_str(&payload_text).map_err(db_error::SerializationError)?; let timeout_ms: u64 = row.get(3)?; Ok((id, url, payload, timeout_ms)) }, @@ -457,7 +454,7 @@ impl EventObserver { ); let url = Url::parse(full_url) - .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {} as a URL", full_url)); + .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {full_url} as a URL")); let host = url.host_str().expect("Invalid URL: missing host"); let port = url.port_or_known_default().unwrap_or(80); @@ -494,8 +491,7 @@ impl EventObserver { } Err(err) => { warn!( - "Event dispatcher: connection or request failed to {}:{} - {:?}", - &host, &port, err; + "Event dispatcher: connection or request failed to {host}:{port} - {err:?}"; "backoff" => ?backoff, "attempts" => attempts ); @@ -549,11 +545,11 @@ impl EventObserver { pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { // Construct the full URL let url_str = if path.starts_with('/') { - format!("{}{}", &self.endpoint, path) + format!("{}{path}", &self.endpoint) } else { - format!("{}/{}", &self.endpoint, path) + format!("{}/{path}", &self.endpoint) }; - let full_url = format!("http://{}", url_str); + let full_url = format!("http://{url_str}"); if let Some(db_path) = &self.db_path { let conn = @@ -604,7 +600,7 @@ impl EventObserver { .collect(); json!({ - "burn_block_hash": format!("0x{}", burn_block), + "burn_block_hash": format!("0x{burn_block}"), "burn_block_height": burn_block_height, "reward_recipients": serde_json::Value::Array(reward_recipients), "reward_slot_holders": serde_json::Value::Array(reward_slot_holders), @@ -642,7 +638,7 @@ impl EventObserver { TransactionOrigin::Burn(op) => ( op.txid().to_string(), "00".to_string(), - BlockstackOperationType::blockstack_op_to_json(&op), + BlockstackOperationType::blockstack_op_to_json(op), ), TransactionOrigin::Stacks(ref tx) => { let txid = tx.txid().to_string(); @@ -741,10 +737,10 @@ impl EventObserver { .collect(); let payload = json!({ - "parent_index_block_hash": format!("0x{}", parent_index_block_hash), + "parent_index_block_hash": format!("0x{parent_index_block_hash}"), "events": serialized_events, "transactions": serialized_txs, - "burn_block_hash": format!("0x{}", burn_block_hash), + "burn_block_hash": format!("0x{burn_block_hash}"), "burn_block_height": burn_block_height, "burn_block_timestamp": burn_block_timestamp, }); @@ -776,6 +772,7 @@ impl EventObserver { self.send_payload(payload, PATH_BURN_BLOCK_SUBMIT); } + #[allow(clippy::too_many_arguments)] fn make_new_block_processed_payload( &self, filtered_events: Vec<(usize, &(bool, Txid, &StacksTransactionEvent))>, @@ -806,12 +803,15 @@ impl EventObserver { }) .collect(); - let mut tx_index: u32 = 0; let mut serialized_txs = vec![]; - for receipt in receipts.iter() { - let payload = EventObserver::make_new_block_txs_payload(receipt, tx_index); + for (tx_index, receipt) in receipts.iter().enumerate() { + let payload = EventObserver::make_new_block_txs_payload( + receipt, + tx_index + .try_into() + .expect("BUG: more receipts than U32::MAX"), + ); serialized_txs.push(payload); - tx_index += 1; } let signer_bitvec_value = signer_bitvec_opt @@ -821,7 +821,7 @@ impl EventObserver { let (reward_set_value, cycle_number_value) = match &reward_set_data { Some(data) => ( - serde_json::to_value(&RewardSetEventPayload::from_reward_set(&data.reward_set)) + serde_json::to_value(RewardSetEventPayload::from_reward_set(&data.reward_set)) .unwrap_or_default(), serde_json::to_value(data.cycle_number).unwrap_or_default(), ), @@ -835,17 +835,17 @@ impl EventObserver { "block_time": block_timestamp, "burn_block_hash": format!("0x{}", metadata.burn_header_hash), "burn_block_height": metadata.burn_header_height, - "miner_txid": format!("0x{}", winner_txid), + "miner_txid": format!("0x{winner_txid}"), "burn_block_time": metadata.burn_header_timestamp, "index_block_hash": format!("0x{}", metadata.index_block_hash()), "parent_block_hash": format!("0x{}", block.parent_block_hash), - "parent_index_block_hash": format!("0x{}", parent_index_hash), + "parent_index_block_hash": format!("0x{parent_index_hash}"), "parent_microblock": format!("0x{}", block.parent_microblock_hash), "parent_microblock_sequence": block.parent_microblock_sequence, "matured_miner_rewards": mature_rewards.clone(), "events": serialized_events, "transactions": serialized_txs, - "parent_burn_block_hash": format!("0x{}", parent_burn_block_hash), + "parent_burn_block_hash": format!("0x{parent_burn_block_hash}"), "parent_burn_block_height": parent_burn_block_height, "parent_burn_block_timestamp": parent_burn_block_timestamp, "anchored_cost": anchored_consumed, @@ -914,6 +914,8 @@ pub struct EventDispatcher { /// Index into `registered_observers` that will receive block proposal events (Nakamoto and /// later) block_proposal_observers_lookup: HashSet, + /// Channel for sending StackerDB events to the miner coordinator + pub stackerdb_channel: Arc>, } /// This struct is used specifically for receiving proposal responses. @@ -1097,9 +1099,16 @@ impl BlockEventDispatcher for EventDispatcher { } } +impl Default for EventDispatcher { + fn default() -> Self { + EventDispatcher::new() + } +} + impl EventDispatcher { pub fn new() -> EventDispatcher { EventDispatcher { + stackerdb_channel: Arc::new(Mutex::new(StackerDBChannel::new())), registered_observers: vec![], contract_events_observers_lookup: HashMap::new(), assets_observers_lookup: HashMap::new(), @@ -1125,7 +1134,7 @@ impl EventDispatcher { ) { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.burn_block_observers_lookup, true); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1149,6 +1158,7 @@ impl EventDispatcher { /// - dispatch_matrix: a vector where each index corresponds to the hashset of event indexes /// that each respective event observer is subscribed to /// - events: a vector of all events from all the tx receipts + #[allow(clippy::type_complexity)] fn create_dispatch_matrix_and_event_vector<'a>( &self, receipts: &'a Vec, @@ -1241,6 +1251,7 @@ impl EventDispatcher { (dispatch_matrix, events) } + #[allow(clippy::too_many_arguments)] pub fn process_chain_tip( &self, block: &StacksBlockEventData, @@ -1264,7 +1275,7 @@ impl EventDispatcher { let all_receipts = receipts.to_owned(); let (dispatch_matrix, events) = self.create_dispatch_matrix_and_event_vector(&all_receipts); - if dispatch_matrix.len() > 0 { + if !dispatch_matrix.is_empty() { let mature_rewards_vec = if let Some(rewards_info) = mature_rewards_info { mature_rewards .iter() @@ -1297,7 +1308,7 @@ impl EventDispatcher { let payload = self.registered_observers[observer_id] .make_new_block_processed_payload( filtered_events, - &block, + block, metadata, receipts, parent_index_hash, @@ -1342,7 +1353,7 @@ impl EventDispatcher { ) }) .collect(); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } let flattened_receipts = processed_unconfirmed_state @@ -1390,12 +1401,12 @@ impl EventDispatcher { .enumerate() .filter_map(|(obs_id, observer)| { let lookup_ix = u16::try_from(obs_id).expect("FATAL: more than 2^16 observers"); - if lookup.contains(&lookup_ix) { - return Some(observer); - } else if include_any && self.any_event_observers_lookup.contains(&lookup_ix) { - return Some(observer); + if lookup.contains(&lookup_ix) + || (include_any && self.any_event_observers_lookup.contains(&lookup_ix)) + { + Some(observer) } else { - return None; + None } }) .collect() @@ -1405,7 +1416,7 @@ impl EventDispatcher { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.mempool_observers_lookup, true); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1427,7 +1438,7 @@ impl EventDispatcher { ) { let interested_observers = self.filter_observers(&self.miner_observers_lookup, false); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1456,7 +1467,7 @@ impl EventDispatcher { ) { let interested_observers = self.filter_observers(&self.mined_microblocks_observers_lookup, false); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1483,7 +1494,7 @@ impl EventDispatcher { tx_events: Vec, ) { let interested_observers = self.filter_observers(&self.miner_observers_lookup, false); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1502,7 +1513,7 @@ impl EventDispatcher { block_size: block_size_bytes, cost: consumed.clone(), tx_events, - miner_signature: block.header.miner_signature.clone(), + miner_signature: block.header.miner_signature, signer_signature_hash: block.header.signer_signature_hash(), signer_signature: block.header.signer_signature.clone(), signer_bitvec, @@ -1522,13 +1533,16 @@ impl EventDispatcher { modified_slots: Vec, ) { debug!( - "event_dispatcher: New StackerDB chunk events for {}: {:?}", - contract_id, modified_slots + "event_dispatcher: New StackerDB chunk events for {contract_id}: {modified_slots:?}" ); let interested_observers = self.filter_observers(&self.stackerdb_observers_lookup, false); - let interested_receiver = STACKER_DB_CHANNEL.is_active(&contract_id); + let stackerdb_channel = self + .stackerdb_channel + .lock() + .expect("FATAL: failed to lock StackerDB channel mutex"); + let interested_receiver = stackerdb_channel.is_active(&contract_id); if interested_observers.is_empty() && interested_receiver.is_none() { return; } @@ -1558,13 +1572,13 @@ impl EventDispatcher { // lazily assemble payload only if we have observers let interested_observers = self.filter_observers(&self.mempool_observers_lookup, true); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } let dropped_txids: Vec<_> = txs .into_iter() - .map(|tx| serde_json::Value::String(format!("0x{}", &tx))) + .map(|tx| serde_json::Value::String(format!("0x{tx}"))) .collect(); let payload = json!({ @@ -1577,9 +1591,9 @@ impl EventDispatcher { } } - pub fn process_new_attachments(&self, attachments: &Vec<(AttachmentInstance, Attachment)>) { + pub fn process_new_attachments(&self, attachments: &[(AttachmentInstance, Attachment)]) { let interested_observers: Vec<_> = self.registered_observers.iter().enumerate().collect(); - if interested_observers.len() < 1 { + if interested_observers.is_empty() { return; } @@ -1598,7 +1612,7 @@ impl EventDispatcher { &self, asset_identifier: &AssetIdentifier, event_index: usize, - dispatch_matrix: &mut Vec>, + dispatch_matrix: &mut [HashSet], ) { if let Some(observer_indexes) = self.assets_observers_lookup.get(asset_identifier) { for o_i in observer_indexes { @@ -1857,8 +1871,7 @@ mod test { // Assert that the connection attempt timed out assert!( result.is_err(), - "Expected a timeout error, but got {:?}", - result + "Expected a timeout error, but got {result:?}" ); assert_eq!( result.unwrap_err().kind(), @@ -2116,7 +2129,7 @@ mod test { let (tx, rx) = channel(); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let request = server.recv().unwrap(); assert_eq!(request.url(), "/test"); @@ -2131,7 +2144,7 @@ mod test { }); let observer = - EventObserver::new(None, format!("127.0.0.1:{}", port), Duration::from_secs(3)); + EventObserver::new(None, format!("127.0.0.1:{port}"), Duration::from_secs(3)); let payload = json!({"key": "value"}); @@ -2150,7 +2163,7 @@ mod test { let (tx, rx) = channel(); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; while let Ok(request) = server.recv() { @@ -2180,7 +2193,7 @@ mod test { }); let observer = - EventObserver::new(None, format!("127.0.0.1:{}", port), Duration::from_secs(3)); + EventObserver::new(None, format!("127.0.0.1:{port}"), Duration::from_secs(3)); let payload = json!({"key": "value"}); @@ -2200,7 +2213,7 @@ mod test { let (tx, rx) = channel(); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; let mut _request_holder = None; @@ -2224,7 +2237,7 @@ mod test { } }); - let observer = EventObserver::new(None, format!("127.0.0.1:{}", port), timeout); + let observer = EventObserver::new(None, format!("127.0.0.1:{port}"), timeout); let payload = json!({"key": "value"}); @@ -2237,7 +2250,7 @@ mod test { // Record the time after the function returns let elapsed_time = start_time.elapsed(); - println!("Elapsed time: {:?}", elapsed_time); + println!("Elapsed time: {elapsed_time:?}"); assert!( elapsed_time >= timeout, "Expected a timeout, but the function returned too quickly" @@ -2263,9 +2276,9 @@ mod test { // Set up a channel to notify when the server has processed the request let (tx, rx) = channel(); - info!("Starting mock server on port {}", port); + info!("Starting mock server on port {port}"); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; let mut _request_holder = None; @@ -2316,7 +2329,7 @@ mod test { let observer = EventObserver::new( Some(working_dir.clone()), - format!("127.0.0.1:{}", port), + format!("127.0.0.1:{port}"), timeout, ); diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index b1ddf2e82b..c285c6a168 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -23,6 +23,7 @@ use crate::TipCandidate; pub type NeonGlobals = Globals; /// Command types for the relayer thread, issued to it by other threads +#[allow(clippy::large_enum_variant)] pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and HandleNetResult(NetworkResult), @@ -99,6 +100,7 @@ impl Clone for Globals { } impl Globals { + #[allow(clippy::too_many_arguments)] pub fn new( coord_comms: CoordinatorChannels, miner_status: Arc>, @@ -282,15 +284,14 @@ impl Globals { **leader_key_registration_state { info!( - "Received burnchain block #{} including key_register_op - {}", - burn_block_height, txid + "Received burnchain block #{burn_block_height} including key_register_op - {txid}" ); if txid == op.txid { let active_key = RegisteredKey { target_block_height, vrf_public_key: op.public_key, - block_height: op.block_height as u64, - op_vtxindex: op.vtxindex as u32, + block_height: op.block_height, + op_vtxindex: op.vtxindex, memo: op.memo, }; @@ -300,8 +301,8 @@ impl Globals { activated_key = Some(active_key); } else { debug!( - "key_register_op {} does not match our pending op {}", - txid, &op.txid + "key_register_op {txid} does not match our pending op {}", + &op.txid ); } } @@ -450,10 +451,7 @@ impl Globals { /// Clear the initiative flag and return its value pub fn take_initiative(&self) -> Option { match self.initiative.lock() { - Ok(mut initiative) => { - let ret = (*initiative).take(); - ret - } + Ok(mut initiative) => (*initiative).take(), Err(_e) => { error!("FATAL: failed to lock initiative"); panic!(); diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index b6df8549c4..4e85750880 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -123,10 +123,7 @@ impl Keychain { let proof = VRF::prove(&sk, bytes.as_ref()); // Ensure that the proof is valid by verifying - let is_valid = match VRF::verify(&pk, &proof, bytes.as_ref()) { - Ok(v) => v, - Err(_) => false, - }; + let is_valid = VRF::verify(&pk, &proof, bytes.as_ref()).unwrap_or(false); assert!(is_valid); proof } @@ -178,7 +175,7 @@ impl Keychain { } /// Sign a transaction as if we were the origin - pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) -> () { + pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) { let sk = self.get_secret_key(); tx_signer .sign_origin(&sk) @@ -333,7 +330,7 @@ mod tests { } }; sk.set_compress_public(true); - self.microblocks_secret_keys.push(sk.clone()); + self.microblocks_secret_keys.push(sk); debug!("Microblock keypair rotated"; "burn_block_height" => %burn_block_height, @@ -346,7 +343,7 @@ mod tests { self.microblocks_secret_keys.last().cloned() } - pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) -> () { + pub fn sign_as_origin(&self, tx_signer: &mut StacksTransactionSigner) { let num_keys = if self.secret_keys.len() < self.threshold as usize { self.secret_keys.len() } else { @@ -364,18 +361,15 @@ mod tests { let vrf_sk = match self.vrf_map.get(vrf_pk) { Some(vrf_pk) => vrf_pk, None => { - warn!("No VRF secret key on file for {:?}", vrf_pk); + warn!("No VRF secret key on file for {vrf_pk:?}"); return None; } }; // Generate the proof - let proof = VRF::prove(&vrf_sk, bytes.as_ref()); + let proof = VRF::prove(vrf_sk, bytes.as_ref()); // Ensure that the proof is valid by verifying - let is_valid = match VRF::verify(vrf_pk, &proof, bytes.as_ref()) { - Ok(v) => v, - Err(_) => false, - }; + let is_valid = VRF::verify(vrf_pk, &proof, bytes.as_ref()).unwrap_or(false); assert!(is_valid); Some(proof) } @@ -385,7 +379,7 @@ mod tests { let public_keys = self .secret_keys .iter() - .map(|ref pk| StacksPublicKey::from_private(pk)) + .map(StacksPublicKey::from_private) .collect(); let version = if is_mainnet { self.hash_mode.to_version_mainnet() @@ -518,7 +512,7 @@ mod tests { TransactionVersion::Testnet, k1.get_transaction_auth().unwrap(), TransactionPayload::TokenTransfer( - recv_addr.clone().into(), + recv_addr.into(), 123, TokenTransferMemo([0u8; 34]), ), @@ -527,7 +521,7 @@ mod tests { TransactionVersion::Testnet, k2.get_transaction_auth().unwrap(), TransactionPayload::TokenTransfer( - recv_addr.clone().into(), + recv_addr.into(), 123, TokenTransferMemo([0u8; 34]), ), diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index fcdc9f5847..4fa1c5e5a7 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -63,11 +63,11 @@ static GLOBAL: Jemalloc = Jemalloc; /// Implmentation of `pick_best_tip` CLI option fn cli_pick_best_tip(config_path: &str, at_stacks_height: Option) -> TipCandidate { - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); let config = match ConfigFile::from_path(config_path) { Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } }; @@ -93,21 +93,21 @@ fn cli_pick_best_tip(config_path: &str, at_stacks_height: Option) -> TipCan at_stacks_height, ); - let best_tip = BlockMinerThread::inner_pick_best_tip(stacks_tips, HashMap::new()).unwrap(); - best_tip + BlockMinerThread::inner_pick_best_tip(stacks_tips, HashMap::new()).unwrap() } /// Implementation of `get_miner_spend` CLI option +#[allow(clippy::incompatible_msrv)] fn cli_get_miner_spend( config_path: &str, mine_start: Option, at_burnchain_height: Option, ) -> u64 { - info!("Loading config at path {}", config_path); - let config = match ConfigFile::from_path(&config_path) { + info!("Loading config at path {config_path}"); + let config = match ConfigFile::from_path(config_path) { Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } }; @@ -155,7 +155,7 @@ fn cli_get_miner_spend( &config, &keychain, &burnchain, - &mut sortdb, + &sortdb, &commit_outs, mine_start.unwrap_or(tip.block_height), at_burnchain_height, @@ -171,7 +171,7 @@ fn cli_get_miner_spend( else { return 0.0; }; - if active_miners_and_commits.len() == 0 { + if active_miners_and_commits.is_empty() { warn!("No active miners detected; using config file burn_fee_cap"); return 0.0; } @@ -181,7 +181,7 @@ fn cli_get_miner_spend( .map(|(miner, _cmt)| miner.as_str()) .collect(); - info!("Active miners: {:?}", &active_miners); + info!("Active miners: {active_miners:?}"); let Ok(unconfirmed_block_commits) = miner_stats .get_unconfirmed_commits(burn_block_height + 1, &active_miners) @@ -195,10 +195,7 @@ fn cli_get_miner_spend( .map(|cmt| (format!("{}", &cmt.apparent_sender), cmt.burn_fee)) .collect(); - info!( - "Found unconfirmed block-commits: {:?}", - &unconfirmed_miners_and_amounts - ); + info!("Found unconfirmed block-commits: {unconfirmed_miners_and_amounts:?}"); let (spend_dist, _total_spend) = MinerStats::get_spend_distribution( &active_miners_and_commits, @@ -207,12 +204,11 @@ fn cli_get_miner_spend( ); let win_probs = if config.miner.fast_rampup { // look at spends 6+ blocks in the future - let win_probs = MinerStats::get_future_win_distribution( + MinerStats::get_future_win_distribution( &active_miners_and_commits, &unconfirmed_block_commits, &commit_outs, - ); - win_probs + ) } else { // look at the current spends let Ok(unconfirmed_burn_dist) = miner_stats @@ -229,14 +225,13 @@ fn cli_get_miner_spend( return 0.0; }; - let win_probs = MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist); - win_probs + MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist) }; - info!("Unconfirmed spend distribution: {:?}", &spend_dist); + info!("Unconfirmed spend distribution: {spend_dist:?}"); info!( - "Unconfirmed win probabilities (fast_rampup={}): {:?}", - config.miner.fast_rampup, &win_probs + "Unconfirmed win probabilities (fast_rampup={}): {win_probs:?}", + config.miner.fast_rampup ); let miner_addrs = BlockMinerThread::get_miner_addrs(&config, &keychain); @@ -247,8 +242,8 @@ fn cli_get_miner_spend( .unwrap_or(0.0); info!( - "This miner's win probability at {} is {}", - tip.block_height, &win_prob + "This miner's win probability at {} is {win_prob}", + tip.block_height ); win_prob }, @@ -259,9 +254,9 @@ fn cli_get_miner_spend( fn main() { panic::set_hook(Box::new(|panic_info| { - error!("Process abort due to thread panic: {}", panic_info); + error!("Process abort due to thread panic: {panic_info}"); let bt = Backtrace::new(); - error!("Panic backtrace: {:?}", &bt); + error!("Panic backtrace: {bt:?}"); // force a core dump #[cfg(unix)] @@ -289,10 +284,7 @@ fn main() { .expect("Failed to parse --mine-at-height argument"); if let Some(mine_start) = mine_start { - info!( - "Will begin mining once Stacks chain has synced to height >= {}", - mine_start - ); + info!("Will begin mining once Stacks chain has synced to height >= {mine_start}"); } let config_file = match subcommand.as_str() { @@ -315,14 +307,14 @@ fn main() { "check-config" => { let config_path: String = args.value_from_str("--config").unwrap(); args.finish(); - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); let config_file = match ConfigFile::from_path(&config_path) { Ok(config_file) => { - debug!("Loaded config file: {:?}", config_file); + debug!("Loaded config file: {config_file:?}"); config_file } Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } }; @@ -332,7 +324,7 @@ fn main() { process::exit(0); } Err(e) => { - warn!("Invalid config: {}", e); + warn!("Invalid config: {e}"); process::exit(1); } }; @@ -340,11 +332,11 @@ fn main() { "start" => { let config_path: String = args.value_from_str("--config").unwrap(); args.finish(); - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); match ConfigFile::from_path(&config_path) { Ok(config_file) => config_file, Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } } @@ -391,7 +383,7 @@ fn main() { args.finish(); let best_tip = cli_pick_best_tip(&config_path, at_stacks_height); - println!("Best tip is {:?}", &best_tip); + println!("Best tip is {best_tip:?}"); process::exit(0); } "get-spend-amount" => { @@ -401,7 +393,7 @@ fn main() { args.finish(); let spend_amount = cli_get_miner_spend(&config_path, mine_start, at_burnchain_height); - println!("Will spend {}", spend_amount); + println!("Will spend {spend_amount}"); process::exit(0); } _ => { @@ -413,7 +405,7 @@ fn main() { let conf = match Config::from_config_file(config_file, true) { Ok(conf) => conf, Err(e) => { - warn!("Invalid config: {}", e); + warn!("Invalid config: {e}"); process::exit(1); } }; @@ -427,8 +419,7 @@ fn main() { if conf.burnchain.mode == "helium" || conf.burnchain.mode == "mocknet" { let mut run_loop = helium::RunLoop::new(conf); if let Err(e) = run_loop.start(num_round) { - warn!("Helium runloop exited: {}", e); - return; + warn!("Helium runloop exited: {e}"); } } else if conf.burnchain.mode == "neon" || conf.burnchain.mode == "nakamoto-neon" diff --git a/testnet/stacks-node/src/monitoring/prometheus.rs b/testnet/stacks-node/src/monitoring/prometheus.rs index e9705142d0..f91ac53bb4 100644 --- a/testnet/stacks-node/src/monitoring/prometheus.rs +++ b/testnet/stacks-node/src/monitoring/prometheus.rs @@ -20,10 +20,7 @@ pub fn start_serving_prometheus_metrics(bind_address: String) -> Result<(), Moni warn!("Prometheus monitoring: unable to get local bind address, will not spawn prometheus endpoint service."); MonitoringError::UnableToGetAddress })?; - info!( - "Prometheus monitoring: server listening on http://{}", - local_addr - ); + info!("Prometheus monitoring: server listening on http://{local_addr}"); let mut incoming = listener.incoming(); while let Some(stream) = incoming.next().await { diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 7cda49e10d..19af89a3bc 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -28,6 +28,8 @@ use stacks::monitoring::update_active_miners_count_gauge; use stacks::net::atlas::AtlasConfig; use stacks::net::relay::Relayer; use stacks::net::stackerdb::StackerDBs; +use stacks::net::Error as NetError; +use stacks::util_lib::db::Error as DBError; use stacks_common::types::chainstate::SortitionId; use stacks_common::types::StacksEpochId; @@ -47,7 +49,7 @@ pub mod sign_coordinator; use self::peer::PeerThread; use self::relayer::{RelayerDirective, RelayerThread}; -pub const RELAYER_MAX_BUFFER: usize = 100; +pub const RELAYER_MAX_BUFFER: usize = 1; const VRF_MOCK_MINER_KEY: u64 = 1; pub const BLOCK_PROCESSOR_STACK_SIZE: usize = 32 * 1024 * 1024; // 32 MB @@ -71,46 +73,71 @@ pub struct StacksNode { } /// Types of errors that can arise during Nakamoto StacksNode operation -#[derive(Debug)] +#[derive(thiserror::Error, Debug)] pub enum Error { /// Can't find the block sortition snapshot for the chain tip + #[error("Can't find the block sortition snapshot for the chain tip")] SnapshotNotFoundForChainTip, /// The burnchain tip changed while this operation was in progress + #[error("The burnchain tip changed while this operation was in progress")] BurnchainTipChanged, /// The Stacks tip changed while this operation was in progress + #[error("The Stacks tip changed while this operation was in progress")] StacksTipChanged, /// Signers rejected a block + #[error("Signers rejected a block")] SignersRejected, /// Error while spawning a subordinate thread + #[error("Error while spawning a subordinate thread: {0}")] SpawnError(std::io::Error), /// Injected testing errors + #[error("Injected testing errors")] FaultInjection, /// This miner was elected, but another sortition occurred before mining started + #[error("This miner was elected, but another sortition occurred before mining started")] MissedMiningOpportunity, /// Attempted to mine while there was no active VRF key + #[error("Attempted to mine while there was no active VRF key")] NoVRFKeyActive, /// The parent block or tenure could not be found + #[error("The parent block or tenure could not be found")] ParentNotFound, /// Something unexpected happened (e.g., hash mismatches) + #[error("Something unexpected happened (e.g., hash mismatches)")] UnexpectedChainState, /// A burnchain operation failed when submitting it to the burnchain + #[error("A burnchain operation failed when submitting it to the burnchain: {0}")] BurnchainSubmissionFailed(BurnchainsError), /// A new parent has been discovered since mining started + #[error("A new parent has been discovered since mining started")] NewParentDiscovered, /// A failure occurred while constructing a VRF Proof + #[error("A failure occurred while constructing a VRF Proof")] BadVrfConstruction, - CannotSelfSign, - MiningFailure(ChainstateError), + #[error("A failure occurred while mining: {0}")] + MiningFailure(#[from] ChainstateError), /// The miner didn't accept their own block + #[error("The miner didn't accept their own block: {0}")] AcceptFailure(ChainstateError), + #[error("A failure occurred while signing a miner's block: {0}")] MinerSignatureError(&'static str), + #[error("A failure occurred while signing a signer's block: {0}")] SignerSignatureError(String), /// A failure occurred while configuring the miner thread + #[error("A failure occurred while configuring the miner thread: {0}")] MinerConfigurationFailed(&'static str), /// An error occurred while operating as the signing coordinator + #[error("An error occurred while operating as the signing coordinator: {0}")] SigningCoordinatorFailure(String), // The thread that we tried to send to has closed + #[error("The thread that we tried to send to has closed")] ChannelClosed, + /// DBError wrapper + #[error("DBError: {0}")] + DBError(#[from] DBError), + /// NetError wrapper + #[error("NetError: {0}")] + NetError(#[from] NetError), } impl StacksNode { @@ -131,7 +158,7 @@ impl StacksNode { .get_miner_address(StacksEpochId::Epoch21, &public_key); let miner_addr_str = addr2str(&miner_addr); let _ = monitoring::set_burnchain_signer(BurnchainSigner(miner_addr_str)).map_err(|e| { - warn!("Failed to set global burnchain signer: {:?}", &e); + warn!("Failed to set global burnchain signer: {e:?}"); e }); } @@ -148,7 +175,7 @@ impl StacksNode { let burnchain = runloop.get_burnchain(); let atlas_config = config.atlas.clone(); let mut keychain = Keychain::default(config.node.seed.clone()); - if let Some(mining_key) = config.miner.mining_key.clone() { + if let Some(mining_key) = config.miner.mining_key { keychain.set_nakamoto_sk(mining_key); } @@ -195,7 +222,7 @@ impl StacksNode { match &data_from_neon.leader_key_registration_state { LeaderKeyRegistrationState::Active(registered_key) => { let pubkey_hash = keychain.get_nakamoto_pkh(); - if pubkey_hash.as_ref() == ®istered_key.memo { + if pubkey_hash.as_ref() == registered_key.memo { data_from_neon.leader_key_registration_state } else { LeaderKeyRegistrationState::Inactive @@ -308,13 +335,13 @@ impl StacksNode { for op in block_commits.into_iter() { if op.txid == block_snapshot.winning_block_txid { info!( - "Received burnchain block #{} including block_commit_op (winning) - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op (winning) - {} ({})", + op.apparent_sender, &op.block_header_hash ); } else if self.is_miner { info!( - "Received burnchain block #{} including block_commit_op - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op - {} ({})", + op.apparent_sender, &op.block_header_hash ); } } @@ -359,25 +386,25 @@ impl StacksNode { } pub(crate) fn save_activated_vrf_key(path: &str, activated_key: &RegisteredKey) { - info!("Activated VRF key; saving to {}", path); + info!("Activated VRF key; saving to {path}"); let Ok(key_json) = serde_json::to_string(&activated_key) else { warn!("Failed to serialize VRF key"); return; }; - let mut f = match fs::File::create(&path) { + let mut f = match fs::File::create(path) { Ok(f) => f, Err(e) => { - warn!("Failed to create {}: {:?}", &path, &e); + warn!("Failed to create {path}: {e:?}"); return; } }; - if let Err(e) = f.write_all(key_json.as_str().as_bytes()) { - warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); + if let Err(e) = f.write_all(key_json.as_bytes()) { + warn!("Failed to write activated VRF key to {path}: {e:?}"); return; } - info!("Saved activated VRF key to {}", &path); + info!("Saved activated VRF key to {path}"); } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index d08fe9c25a..745ae03fc9 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -67,6 +67,7 @@ pub static TEST_SKIP_P2P_BROADCAST: std::sync::Mutex> = std::sync:: /// miner thread sleep before trying again? const ABORT_TRY_AGAIN_MS: u64 = 200; +#[allow(clippy::large_enum_variant)] pub enum MinerDirective { /// The miner won sortition so they should begin a new tenure BeginTenure { @@ -108,6 +109,8 @@ pub enum MinerReason { /// sortition. burn_view_consensus_hash: ConsensusHash, }, + /// The miner thread was spawned to initialize a prior empty tenure + EmptyTenure, } impl std::fmt::Display for MinerReason { @@ -120,6 +123,7 @@ impl std::fmt::Display for MinerReason { f, "Extended: burn_view_consensus_hash = {burn_view_consensus_hash:?}", ), + MinerReason::EmptyTenure => write!(f, "EmptyTenure"), } } } @@ -256,7 +260,7 @@ impl BlockMinerThread { globals.block_miner(); let prior_miner_result = prior_miner .join() - .map_err(|_| NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted))?; + .map_err(|_| ChainstateError::MinerAborted)?; if let Err(e) = prior_miner_result { // it's okay if the prior miner thread exited with an error. // in many cases this is expected (i.e., a burnchain block occurred) @@ -285,8 +289,8 @@ impl BlockMinerThread { if let Some(prior_miner) = prior_miner { Self::stop_miner(&self.globals, prior_miner)?; } - let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) - .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::NetError(e)))?; + let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true)?; + let mut last_block_rejected = false; // now, actually run this tenure loop { @@ -358,10 +362,8 @@ impl BlockMinerThread { // try again, in case a new sortition is pending self.globals - .raise_initiative(format!("MiningFailure: {:?}", &e)); - return Err(NakamotoNodeError::MiningFailure( - ChainstateError::MinerAborted, - )); + .raise_initiative(format!("MiningFailure: {e:?}")); + return Err(ChainstateError::MinerAborted.into()); } } }; @@ -390,15 +392,25 @@ impl BlockMinerThread { return Err(e); } _ => { - error!("Error while gathering signatures: {e:?}. Will try mining again."; + // Sleep for a bit to allow signers to catch up + let pause_ms = if last_block_rejected { + self.config.miner.subsequent_rejection_pause_ms + } else { + self.config.miner.first_rejection_pause_ms + }; + + error!("Error while gathering signatures: {e:?}. Will try mining again in {pause_ms}."; "signer_sighash" => %new_block.header.signer_signature_hash(), "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); + thread::sleep(Duration::from_millis(pause_ms)); + last_block_rejected = true; continue; } }, }; + last_block_rejected = false; new_block.header.signer_signature = signer_signature; if let Err(e) = self.broadcast(new_block.clone(), reward_set, &stackerdbs) { @@ -417,7 +429,7 @@ impl BlockMinerThread { // update mined-block counters and mined-tenure counters self.globals.counters.bump_naka_mined_blocks(); - if !self.last_block_mined.is_none() { + if self.last_block_mined.is_some() { // this is the first block of the tenure, bump tenure counter self.globals.counters.bump_naka_mined_tenures(); } @@ -546,6 +558,7 @@ impl BlockMinerThread { miner_privkey, &self.config, self.globals.should_keep_running.clone(), + self.event_dispatcher.stackerdb_channel.clone(), ) .map_err(|e| { NakamotoNodeError::SigningCoordinatorFailure(format!( @@ -566,12 +579,12 @@ impl BlockMinerThread { &self.burnchain, &sort_db, &mut chain_state, - &stackerdbs, + stackerdbs, &self.globals.counters, &self.burn_election_block.consensus_hash, )?; - return Ok((reward_set, signature)); + Ok((reward_set, signature)) } /// Fault injection -- possibly fail to broadcast @@ -583,13 +596,12 @@ impl BlockMinerThread { .fault_injection_block_push_fail_probability .unwrap_or(0) .min(100); - let will_drop = if drop_prob > 0 { + if drop_prob > 0 { let throw: u8 = thread_rng().gen_range(0..100); throw < drop_prob } else { false - }; - will_drop + } } /// Store a block to the chainstate, and if successful (it should be since we mined it), @@ -614,7 +626,7 @@ impl BlockMinerThread { let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; let accepted = NakamotoChainState::accept_block( &chainstate_config, - &block, + block, &mut sortition_handle, &staging_tx, headers_conn, @@ -641,14 +653,14 @@ impl BlockMinerThread { } let block_id = block.block_id(); - debug!("Broadcasting block {}", &block_id); + debug!("Broadcasting block {block_id}"); if let Err(e) = self.p2p_handle.broadcast_message( vec![], StacksMessageType::NakamotoBlocks(NakamotoBlocksData { blocks: vec![block.clone()], }), ) { - warn!("Failed to broadcast block {}: {:?}", &block_id, &e); + warn!("Failed to broadcast block {block_id}: {e:?}"); } Ok(()) } @@ -697,7 +709,7 @@ impl BlockMinerThread { miner_privkey, &sort_db, &self.burn_block, - &stackerdbs, + stackerdbs, SignerMessage::BlockPushed(block), MinerSlotID::BlockPushed, chain_state.mainnet, @@ -794,7 +806,7 @@ impl BlockMinerThread { // load up stacks chain tip let (stacks_tip_ch, stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(burn_db.conn()).map_err(|e| { - error!("Failed to load canonical Stacks tip: {:?}", &e); + error!("Failed to load canonical Stacks tip: {e:?}"); NakamotoNodeError::ParentNotFound })?; @@ -806,8 +818,8 @@ impl BlockMinerThread { ) .map_err(|e| { error!( - "Could not query header info for tenure tip {} off of {}: {:?}", - &self.burn_election_block.consensus_hash, &stacks_tip_block_id, &e + "Could not query header info for tenure tip {} off of {stacks_tip_block_id}: {e:?}", + &self.burn_election_block.consensus_hash ); NakamotoNodeError::ParentNotFound })?; @@ -835,8 +847,8 @@ impl BlockMinerThread { NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) .map_err(|e| { error!( - "Could not query header for parent tenure ID {}: {:?}", - &self.parent_tenure_id, &e + "Could not query header for parent tenure ID {}: {e:?}", + &self.parent_tenure_id ); NakamotoNodeError::ParentNotFound })? @@ -851,7 +863,7 @@ impl BlockMinerThread { &parent_tenure_header.consensus_hash, ) .map_err(|e| { - error!("Could not query parent tenure finish block: {:?}", &e); + error!("Could not query parent tenure finish block: {e:?}"); NakamotoNodeError::ParentNotFound })?; if let Some(header) = header_opt { @@ -862,31 +874,27 @@ impl BlockMinerThread { "Stacks block parent ID may be an epoch2x block: {}", &self.parent_tenure_id ); - let epoch2_header = - NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) - .map_err(|e| { - error!( - "Could not query header info for epoch2x tenure block ID {}: {:?}", - &self.parent_tenure_id, &e - ); - NakamotoNodeError::ParentNotFound - })? - .ok_or_else(|| { - error!( - "No header info for epoch2x tenure block ID {}", - &self.parent_tenure_id - ); - NakamotoNodeError::ParentNotFound - })?; - - epoch2_header + NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) + .map_err(|e| { + error!( + "Could not query header info for epoch2x tenure block ID {}: {e:?}", + &self.parent_tenure_id + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No header info for epoch2x tenure block ID {}", + &self.parent_tenure_id + ); + NakamotoNodeError::ParentNotFound + })? } }; debug!( - "Miner: stacks tip parent header is {} {:?}", - &stacks_tip_header.index_block_hash(), - &stacks_tip_header + "Miner: stacks tip parent header is {} {stacks_tip_header:?}", + &stacks_tip_header.index_block_hash() ); let miner_address = self .keychain @@ -918,19 +926,19 @@ impl BlockMinerThread { let vrf_proof = if self.config.get_node_config(false).mock_mining { self.keychain.generate_proof( VRF_MOCK_MINER_KEY, - self.burn_block.sortition_hash.as_bytes(), + self.burn_election_block.sortition_hash.as_bytes(), ) } else { self.keychain.generate_proof( self.registered_key.target_block_height, - self.burn_block.sortition_hash.as_bytes(), + self.burn_election_block.sortition_hash.as_bytes(), ) }; debug!( "Generated VRF Proof: {} over {} ({},{}) with key {}", vrf_proof.to_hex(), - &self.burn_block.sortition_hash, + &self.burn_election_block.sortition_hash, &self.burn_block.block_height, &self.burn_block.burn_header_hash, &self.registered_key.vrf_public_key.to_hex() @@ -970,8 +978,8 @@ impl BlockMinerThread { NakamotoChainState::get_block_header(chain_state.db(), &x.header.parent_block_id) .map_err(|e| { error!( - "Could not query header info for parent block ID {}: {:?}", - &x.header.parent_block_id, &e + "Could not query header info for parent block ID {}: {e:?}", + &x.header.parent_block_id ); NakamotoNodeError::ParentNotFound })? @@ -1045,9 +1053,7 @@ impl BlockMinerThread { ) { // treat a too-soon-to-mine block as an interrupt: this will let the caller sleep and then re-evaluate // all the pre-mining checks (burnchain tip changes, signal interrupts, etc.) - return Err(NakamotoNodeError::MiningFailure( - ChainstateError::MinerAborted, - )); + return Err(ChainstateError::MinerAborted.into()); } // build the block itself @@ -1075,13 +1081,11 @@ impl BlockMinerThread { ) { error!("Relayer: Failure mining anchored block: {e}"); } - NakamotoNodeError::MiningFailure(e) + e })?; if block.txs.is_empty() { - return Err(NakamotoNodeError::MiningFailure( - ChainstateError::NoTransactionsToMine, - )); + return Err(ChainstateError::NoTransactionsToMine.into()); } let mining_key = self.keychain.get_nakamoto_sk(); let miner_signature = mining_key @@ -1140,9 +1144,9 @@ impl BlockMinerThread { let parent_block_id = parent_block_info.stacks_parent_header.index_block_hash(); let mut payload = TenureChangePayload { - tenure_consensus_hash: self.burn_election_block.consensus_hash.clone(), + tenure_consensus_hash: self.burn_election_block.consensus_hash, prev_tenure_consensus_hash: parent_tenure_info.parent_tenure_consensus_hash, - burn_view_consensus_hash: self.burn_election_block.consensus_hash.clone(), + burn_view_consensus_hash: self.burn_election_block.consensus_hash, previous_tenure_end: parent_block_id, previous_tenure_blocks: u32::try_from(parent_tenure_info.parent_tenure_blocks) .expect("FATAL: more than u32 blocks in a tenure"), @@ -1151,7 +1155,7 @@ impl BlockMinerThread { }; let (tenure_change_tx, coinbase_tx) = match &self.reason { - MinerReason::BlockFound => { + MinerReason::BlockFound | MinerReason::EmptyTenure => { let tenure_change_tx = self.generate_tenure_change_tx(current_miner_nonce, payload)?; let coinbase_tx = @@ -1245,7 +1249,7 @@ impl ParentStacksBlockInfo { } let Ok(Some(parent_tenure_header)) = - NakamotoChainState::get_block_header(chain_state.db(), &parent_tenure_id) + NakamotoChainState::get_block_header(chain_state.db(), parent_tenure_id) else { warn!("Failed loading parent tenure ID"; "parent_tenure_id" => %parent_tenure_id); return Err(NakamotoNodeError::ParentNotFound); @@ -1286,7 +1290,7 @@ impl ParentStacksBlockInfo { } else { 1 }; - let parent_tenure_consensus_hash = parent_tenure_header.consensus_hash.clone(); + let parent_tenure_consensus_hash = parent_tenure_header.consensus_hash; Some(ParentTenureInfo { parent_tenure_blocks, parent_tenure_consensus_hash, @@ -1314,7 +1318,7 @@ impl ParentStacksBlockInfo { let account = chain_state .with_read_only_clarity_tx( &burn_db - .index_handle_at_block(&chain_state, &stacks_tip_header.index_block_hash()) + .index_handle_at_block(chain_state, &stacks_tip_header.index_block_hash()) .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, &stacks_tip_header.index_block_hash(), |conn| StacksChainState::get_account(conn, &principal), diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index 004023ea26..3c4e6a98f4 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::VecDeque; use std::net::SocketAddr; use std::sync::mpsc::TrySendError; use std::thread; @@ -54,11 +53,9 @@ pub struct PeerThread { chainstate: StacksChainState, /// handle to the mempool DB mempool: MemPoolDB, - /// buffer of relayer commands with block data that couldn't be sent to the relayer just yet - /// (i.e. due to backpressure). We track this separately, instead of just using a bigger - /// channel, because we need to know when backpressure occurs in order to throttle the p2p - /// thread's downloader. - results_with_data: VecDeque, + /// Buffered network result relayer command. + /// P2P network results are consolidated into a single directive. + results_with_data: Option, /// total number of p2p state-machine passes so far. Used to signal when to download the next /// reward cycle of blocks num_p2p_state_machine_passes: u64, @@ -199,7 +196,7 @@ impl PeerThread { sortdb, chainstate, mempool, - results_with_data: VecDeque::new(), + results_with_data: None, num_p2p_state_machine_passes: 0, num_inv_sync_passes: 0, num_download_passes: 0, @@ -227,6 +224,7 @@ impl PeerThread { /// Run one pass of the p2p/http state machine /// Return true if we should continue running passes; false if not + #[allow(clippy::borrowed_box)] pub(crate) fn run_one_pass( &mut self, indexer: &B, @@ -238,12 +236,22 @@ impl PeerThread { ) -> bool { // initial block download? let ibd = self.globals.sync_comms.get_ibd(); - let download_backpressure = self.results_with_data.len() > 0; + let download_backpressure = self + .results_with_data + .as_ref() + .map(|res| { + if let RelayerDirective::HandleNetResult(netres) = &res { + netres.has_block_data_to_store() + } else { + false + } + }) + .unwrap_or(false); + let poll_ms = if !download_backpressure && self.net.has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( - "P2P: backpressure: {}, more downloads: {}", - download_backpressure, + "P2P: backpressure: {download_backpressure}, more downloads: {}", self.net.has_more_downloads() ); 1 @@ -258,7 +266,7 @@ impl PeerThread { // NOTE: handler_args must be created such that it outlives the inner net.run() call and // doesn't ref anything within p2p_thread. let handler_args = RPCHandlerArgs { - exit_at_block_height: self.config.burnchain.process_exit_at_block_height.clone(), + exit_at_block_height: self.config.burnchain.process_exit_at_block_height, genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) .unwrap(), event_observer: Some(event_dispatcher), @@ -266,7 +274,6 @@ impl PeerThread { cost_metric: Some(cost_metric.as_ref()), fee_estimator: fee_estimator.map(|boxed_estimator| boxed_estimator.as_ref()), coord_comms: Some(&self.globals.coord_comms), - ..RPCHandlerArgs::default() }; self.net.run( indexer, @@ -282,7 +289,6 @@ impl PeerThread { }; match p2p_res { Ok(network_result) => { - let mut have_update = false; if self.num_p2p_state_machine_passes < network_result.num_state_machine_passes { // p2p state-machine did a full pass. Notify anyone listening. self.globals.sync_comms.notify_p2p_state_pass(); @@ -293,52 +299,51 @@ impl PeerThread { // inv-sync state-machine did a full pass. Notify anyone listening. self.globals.sync_comms.notify_inv_sync_pass(); self.num_inv_sync_passes = network_result.num_inv_sync_passes; - - // the relayer cares about the number of inventory passes, so pass this along - have_update = true; } if self.num_download_passes < network_result.num_download_passes { // download state-machine did a full pass. Notify anyone listening. self.globals.sync_comms.notify_download_pass(); self.num_download_passes = network_result.num_download_passes; - - // the relayer cares about the number of download passes, so pass this along - have_update = true; } - if network_result.has_data_to_store() - || self.last_burn_block_height != network_result.burn_height - || have_update - { - // pass along if we have blocks, microblocks, or transactions, or a status - // update on the network's view of the burnchain - self.last_burn_block_height = network_result.burn_height; - self.results_with_data - .push_back(RelayerDirective::HandleNetResult(network_result)); + self.last_burn_block_height = network_result.burn_height; + if let Some(res) = self.results_with_data.take() { + if let RelayerDirective::HandleNetResult(netres) = res { + let new_res = netres.update(network_result); + self.results_with_data = Some(RelayerDirective::HandleNetResult(new_res)); + } + } else { + self.results_with_data = + Some(RelayerDirective::HandleNetResult(network_result)); } + + self.globals.raise_initiative( + "PeerThread::run_one_pass() with data-bearing network result".to_string(), + ); } Err(e) => { // this is only reachable if the network is not instantiated correctly -- // i.e. you didn't connect it - panic!("P2P: Failed to process network dispatch: {:?}", &e); + panic!("P2P: Failed to process network dispatch: {e:?}"); } }; - while let Some(next_result) = self.results_with_data.pop_front() { + if let Some(next_result) = self.results_with_data.take() { // have blocks, microblocks, and/or transactions (don't care about anything else), // or a directive to mine microblocks + self.globals.raise_initiative( + "PeerThread::run_one_pass() with backlogged network results".to_string(), + ); if let Err(e) = self.globals.relay_send.try_send(next_result) { debug!( - "P2P: {:?}: download backpressure detected (bufferred {})", + "P2P: {:?}: download backpressure detected", &self.net.local_peer, - self.results_with_data.len() ); match e { TrySendError::Full(directive) => { // don't lose this data -- just try it again - self.results_with_data.push_front(directive); - break; + self.results_with_data = Some(directive); } TrySendError::Disconnected(_) => { info!("P2P: Relayer hang up with p2p channel"); @@ -347,13 +352,7 @@ impl PeerThread { } } } else { - debug!( - "P2P: Dispatched result to Relayer! {} results remaining", - self.results_with_data.len() - ); - self.globals.raise_initiative( - "PeerThread::run_one_pass() with data-bearing network result".to_string(), - ); + debug!("P2P: Dispatched result to Relayer!",); } } diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index ef01f67f4b..b346cdc346 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -31,7 +31,7 @@ use stacks::chainstate::burn::operations::{ }; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; -use stacks::chainstate::nakamoto::NakamotoChainState; +use stacks::chainstate::nakamoto::{NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{ @@ -67,6 +67,7 @@ use crate::run_loop::RegisteredKey; use crate::BitcoinRegtestController; /// Command types for the Nakamoto relayer thread, issued to it by other threads +#[allow(clippy::large_enum_variant)] pub enum RelayerDirective { /// Handle some new data that arrived on the network (such as blocks, transactions, and HandleNetResult(NetworkResult), @@ -142,7 +143,7 @@ impl LastCommit { /// What's the parent tenure's tenure-start block hash? pub fn parent_tenure_id(&self) -> StacksBlockId { - StacksBlockId(self.block_commit.block_header_hash.clone().0) + StacksBlockId(self.block_commit.block_header_hash.0) } /// What's the stacks tip at the time of commit? @@ -167,7 +168,7 @@ impl LastCommit { /// Set our txid pub fn set_txid(&mut self, txid: &Txid) { - self.txid = Some(txid.clone()); + self.txid = Some(*txid); } } @@ -235,6 +236,8 @@ pub struct RelayerThread { /// Information about the last-sent block commit, and the relayer's view of the chain at the /// time it was sent. last_committed: Option, + /// Timeout for waiting for the first block in a tenure before submitting a block commit + new_tenure_timeout: Option, } impl RelayerThread { @@ -292,6 +295,7 @@ impl RelayerThread { is_miner, next_initiative: Instant::now() + Duration::from_millis(next_initiative_delay), last_committed: None, + new_tenure_timeout: None, } } @@ -304,9 +308,7 @@ impl RelayerThread { /// chain tip? fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place - (self.min_network_download_passes <= self.last_network_download_passes - // a network inv pass took place - && self.min_network_download_passes <= self.last_network_download_passes) + self.min_network_download_passes <= self.last_network_download_passes // we waited long enough for a download pass, but timed out waiting || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms() // we're not supposed to wait at all @@ -380,20 +382,50 @@ impl RelayerThread { /// parent block could be an epoch 2 block. In this case, the right thing to do is to wait for /// the next block-commit. pub(crate) fn choose_miner_directive( - config: &Config, - sortdb: &SortitionDB, + &self, sn: BlockSnapshot, won_sortition: bool, committed_index_hash: StacksBlockId, ) -> Option { + let (cur_stacks_tip_ch, cur_stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) + .expect("FATAL: failed to query sortition DB for stacks tip"); + + let stacks_tip = StacksBlockId::new(&cur_stacks_tip_ch, &cur_stacks_tip_bh); + let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( + &mut self.chainstate.index_conn(), + &stacks_tip, + &cur_stacks_tip_ch, + ) + .expect( + "Relayer: Failed to get tenure-start block header for stacks tip {stacks_tip}: {e:?}", + ) + .expect("Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip}"); + let directive = if sn.sortition { Some( - if won_sortition || config.get_node_config(false).mock_mining { + if won_sortition || self.config.get_node_config(false).mock_mining { + info!("Relayer: Won sortition; begin tenure."); MinerDirective::BeginTenure { parent_tenure_start: committed_index_hash, burnchain_tip: sn, } + } else if committed_index_hash + != highest_tenure_start_block_header.index_block_hash() + { + info!( + "Relayer: Winner of sortition {} did not commit to the correct parent tenure. Attempt to continue tenure.", + &sn.consensus_hash + ); + // We didn't win the sortition, but the miner that did win + // did not commit to the correct parent tenure. This means + // it will be unable to produce a valid block, so we should + // continue our tenure. + MinerDirective::ContinueTenure { + new_burn_view: sn.consensus_hash, + } } else { + info!("Relayer: Stop tenure"); MinerDirective::StopTenure }, ) @@ -402,16 +434,16 @@ impl RelayerThread { // If it's in epoch 2.x, then we must always begin a new tenure, but we can't do so // right now since this sortition has no winner. let (cur_stacks_tip_ch, _cur_stacks_tip_bh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) + SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for stacks tip"); let stacks_tip_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cur_stacks_tip_ch) + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &cur_stacks_tip_ch) .expect("FATAL: failed to query sortiiton DB for epoch") .expect("FATAL: no sortition for canonical stacks tip"); let cur_epoch = - SortitionDB::get_stacks_epoch(sortdb.conn(), stacks_tip_sn.block_height) + SortitionDB::get_stacks_epoch(self.sortdb.conn(), stacks_tip_sn.block_height) .expect("FATAL: failed to query sortition DB for epoch") .expect("FATAL: no epoch defined for existing sortition"); @@ -422,6 +454,7 @@ impl RelayerThread { ); None } else { + info!("Relayer: No sortition; continue tenure."); Some(MinerDirective::ContinueTenure { new_burn_view: sn.consensus_hash, }) @@ -472,19 +505,13 @@ impl RelayerThread { .expect("FATAL: failed to query sortition DB"); if cur_sn.consensus_hash != consensus_hash { - info!("Relayer: Current sortition {} is ahead of processed sortition {}; taking no action", &cur_sn.consensus_hash, consensus_hash); + info!("Relayer: Current sortition {} is ahead of processed sortition {consensus_hash}; taking no action", &cur_sn.consensus_hash); self.globals .raise_initiative("process_sortition".to_string()); return Ok(None); } - let directive_opt = Self::choose_miner_directive( - &self.config, - &self.sortdb, - sn, - won_sortition, - committed_index_hash, - ); + let directive_opt = self.choose_miner_directive(sn, won_sortition, committed_index_hash); Ok(directive_opt) } @@ -497,7 +524,7 @@ impl RelayerThread { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo: miner_pkh.as_bytes().to_vec(), - consensus_hash: consensus_hash.clone(), + consensus_hash: *consensus_hash, vtxindex: 0, txid: Txid([0u8; 32]), block_height: 0, @@ -555,6 +582,7 @@ impl RelayerThread { tip_block_ch: &ConsensusHash, tip_block_bh: &BlockHeaderHash, ) -> Result { + let tip_block_id = StacksBlockId::new(&tip_block_ch, &tip_block_bh); let sort_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .map_err(|_| NakamotoNodeError::SnapshotNotFoundForChainTip)?; @@ -564,19 +592,17 @@ impl RelayerThread { let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( &mut self.chainstate.index_conn(), &stacks_tip, - &tip_block_ch, + tip_block_ch, ) .map_err(|e| { error!( - "Relayer: Failed to get tenure-start block header for stacks tip {}: {:?}", - &stacks_tip, &e + "Relayer: Failed to get tenure-start block header for stacks tip {stacks_tip}: {e:?}" ); NakamotoNodeError::ParentNotFound })? .ok_or_else(|| { error!( - "Relayer: Failed to find tenure-start block header for stacks tip {}", - &stacks_tip + "Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip}" ); NakamotoNodeError::ParentNotFound })?; @@ -589,17 +615,11 @@ impl RelayerThread { tip_block_ch, ) .map_err(|e| { - error!( - "Failed to load VRF proof for {} off of {}: {:?}", - tip_block_ch, &stacks_tip, &e - ); + error!("Failed to load VRF proof for {tip_block_ch} off of {stacks_tip}: {e:?}"); NakamotoNodeError::ParentNotFound })? .ok_or_else(|| { - error!( - "No block VRF proof for {} off of {}", - tip_block_ch, &stacks_tip - ); + error!("No block VRF proof for {tip_block_ch} off of {stacks_tip}"); NakamotoNodeError::ParentNotFound })?; @@ -612,7 +632,7 @@ impl RelayerThread { &self.burnchain, ) .map_err(|e| { - error!("Relayer: Failure fetching recipient set: {:?}", e); + error!("Relayer: Failure fetching recipient set: {e:?}"); NakamotoNodeError::SnapshotNotFoundForChainTip })?; @@ -636,18 +656,41 @@ impl RelayerThread { return Err(NakamotoNodeError::ParentNotFound); }; - // find the parent block-commit of this commit + // find the parent block-commit of this commit, so we can find the parent vtxindex + // if the parent is a shadow block, then the vtxindex would be 0. let commit_parent_block_burn_height = tip_tenure_sortition.block_height; - let Ok(Some(parent_winning_tx)) = SortitionDB::get_block_commit( - self.sortdb.conn(), - &tip_tenure_sortition.winning_block_txid, - &tip_tenure_sortition.sortition_id, - ) else { - error!("Relayer: Failed to lookup the block commit of parent tenure ID"; "tenure_consensus_hash" => %tip_block_ch); - return Err(NakamotoNodeError::SnapshotNotFoundForChainTip); - }; + let commit_parent_winning_vtxindex = if let Ok(Some(parent_winning_tx)) = + SortitionDB::get_block_commit( + self.sortdb.conn(), + &tip_tenure_sortition.winning_block_txid, + &tip_tenure_sortition.sortition_id, + ) { + parent_winning_tx.vtxindex + } else { + debug!( + "{}/{} ({}) must be a shadow block, since it has no block-commit", + &tip_block_bh, &tip_block_ch, &tip_block_id + ); + let Ok(Some(parent_version)) = + NakamotoChainState::get_nakamoto_block_version(self.chainstate.db(), &tip_block_id) + else { + error!( + "Relayer: Failed to lookup block version of {}", + &tip_block_id + ); + return Err(NakamotoNodeError::ParentNotFound); + }; - let commit_parent_winning_vtxindex = parent_winning_tx.vtxindex; + if !NakamotoBlockHeader::is_shadow_block_version(parent_version) { + error!( + "Relayer: parent block-commit of {} not found, and it is not a shadow block", + &tip_block_id + ); + return Err(NakamotoNodeError::ParentNotFound); + } + + 0 + }; // epoch in which this commit will be sent (affects how the burnchain client processes it) let Ok(Some(target_epoch)) = @@ -730,9 +773,7 @@ impl RelayerThread { /// * last_burn_block corresponds to the canonical sortition DB's chain tip /// * the time of issuance is sufficiently recent /// * there are no unprocessed stacks blocks in the staging DB - /// * the relayer has already tried a download scan that included this sortition (which, if a - /// block was found, would have placed it into the staging DB and marked it as - /// unprocessed) + /// * the relayer has already tried a download scan that included this sortition (which, if a block was found, would have placed it into the staging DB and marked it as unprocessed) /// * a miner thread is not running already fn create_block_miner( &mut self, @@ -750,16 +791,15 @@ impl RelayerThread { return Err(NakamotoNodeError::FaultInjection); } - let burn_header_hash = burn_tip.burn_header_hash.clone(); + let burn_header_hash = burn_tip.burn_header_hash; let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); + let burn_chain_tip = burn_chain_sn.burn_header_hash; if burn_chain_tip != burn_header_hash { debug!( - "Relayer: Drop stale RunTenure for {}: current sortition is for {}", - &burn_header_hash, &burn_chain_tip + "Relayer: Drop stale RunTenure for {burn_header_hash}: current sortition is for {burn_chain_tip}" ); self.globals.counters.bump_missed_tenures(); return Err(NakamotoNodeError::MissedMiningOpportunity); @@ -819,14 +859,14 @@ impl RelayerThread { .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { if let Err(e) = new_miner_state.run_miner(prior_tenure_thread) { - info!("Miner thread failed: {:?}", &e); + info!("Miner thread failed: {e:?}"); Err(e) } else { Ok(()) } }) .map_err(|e| { - error!("Relayer: Failed to start tenure thread: {:?}", &e); + error!("Relayer: Failed to start tenure thread: {e:?}"); NakamotoNodeError::SpawnError(e) })?; debug!( @@ -852,7 +892,7 @@ impl RelayerThread { .name(format!("tenure-stop-{}", self.local_peer.data_url)) .spawn(move || BlockMinerThread::stop_miner(&globals, prior_tenure_thread)) .map_err(|e| { - error!("Relayer: Failed to spawn a stop-tenure thread: {:?}", &e); + error!("Relayer: Failed to spawn a stop-tenure thread: {e:?}"); NakamotoNodeError::SpawnError(e) })?; @@ -861,83 +901,187 @@ impl RelayerThread { Ok(()) } + /// Get the public key hash for the mining key. + fn get_mining_key_pkh(&self) -> Option { + let Some(ref mining_key) = self.config.miner.mining_key else { + return None; + }; + Some(Hash160::from_node_public_key( + &StacksPublicKey::from_private(mining_key), + )) + } + + /// Get the tenure-start block header hash of a given consensus hash. + /// For Nakamoto blocks, this is the first block in the tenure identified by the consensus + /// hash. + /// For epoch2 blocks, this is simply the block whose winning sortition happened in the + /// sortition identified by the consensus hash. + /// + /// `tip_block_id` is the chain tip from which to perform the query. + fn get_tenure_bhh( + &self, + tip_block_id: &StacksBlockId, + ch: &ConsensusHash, + ) -> Result { + let highest_tenure_start_block_header = NakamotoChainState::get_tenure_start_block_header( + &mut self.chainstate.index_conn(), + tip_block_id, + &ch, + )? + .ok_or_else(|| { + error!( + "Relayer: Failed to find tenure-start block header for stacks tip {tip_block_id}" + ); + NakamotoNodeError::ParentNotFound + })?; + Ok(BlockHeaderHash( + highest_tenure_start_block_header.index_block_hash().0, + )) + } + + /// Determine the type of tenure change to issue based on whether this + /// miner was the last successful miner (miner of the canonical tip). + fn determine_tenure_type( + &self, + canonical_snapshot: BlockSnapshot, + last_snapshot: BlockSnapshot, + new_burn_view: ConsensusHash, + mining_pkh: Hash160, + ) -> (StacksBlockId, BlockSnapshot, MinerReason) { + if canonical_snapshot.miner_pk_hash != Some(mining_pkh) { + debug!("Relayer: Miner was not the last successful miner. Issue a new tenure change payload."); + ( + StacksBlockId(last_snapshot.winning_stacks_block_hash.0), + last_snapshot, + MinerReason::EmptyTenure, + ) + } else { + debug!("Relayer: Miner was the last successful miner. Issue a tenure extend from the chain tip."); + ( + self.sortdb.get_canonical_stacks_tip_block_id(), + canonical_snapshot, + MinerReason::Extended { + burn_view_consensus_hash: new_burn_view, + }, + ) + } + } + + /// Get the block snapshot of the most recent sortition that committed to + /// the canonical tip. If the latest sortition did not commit to the + /// canonical tip, then the tip's tenure is the last good sortition. + fn get_last_good_block_snapshot( + &self, + burn_tip: &BlockSnapshot, + highest_tenure_bhh: &BlockHeaderHash, + canonical_stacks_tip_ch: &ConsensusHash, + ) -> Result { + let ih = self.sortdb.index_handle(&burn_tip.sortition_id); + let sn = ih + .get_last_snapshot_with_sortition(burn_tip.block_height) + .map_err(|e| { + error!("Relayer: failed to get last snapshot with sortition: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + if &sn.winning_stacks_block_hash != highest_tenure_bhh { + info!( + "Relayer: Sortition winner is not committed to the canonical tip; allowing last miner to extend"; + "burn_block_height" => burn_tip.block_height, + "consensus_hash" => %burn_tip.consensus_hash, + ); + + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), canonical_stacks_tip_ch) + .map_err(|e| { + error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for canonical tip"); + NakamotoNodeError::SnapshotNotFoundForChainTip + }) + } else { + Ok(sn) + } + } + + /// Attempt to continue a miner's tenure into the next burn block. + /// This is allowed if the miner won the last good sortition and one of the + /// following conditions is met: + /// - There was no sortition in the latest burn block + /// - The winner of the latest sortition did not commit to the canonical tip + /// - The winner of the latest sortition did not mine any blocks within the + /// timeout period (not yet implemented) fn continue_tenure(&mut self, new_burn_view: ConsensusHash) -> Result<(), NakamotoNodeError> { if let Err(e) = self.stop_tenure() { error!("Relayer: Failed to stop tenure: {e:?}"); return Ok(()); } debug!("Relayer: successfully stopped tenure."); - // Check if we should undergo a tenure change to switch to the new burn view + + // Get the necessary snapshots and state let burn_tip = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &new_burn_view) - .map_err(|e| { - error!("Relayer: failed to get block snapshot for new burn view: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })? + SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &new_burn_view)? .ok_or_else(|| { error!("Relayer: failed to get block snapshot for new burn view"); NakamotoNodeError::SnapshotNotFoundForChainTip })?; - let (canonical_stacks_tip_ch, canonical_stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap(); let canonical_stacks_tip = StacksBlockId::new(&canonical_stacks_tip_ch, &canonical_stacks_tip_bh); - let block_election_snapshot = - SortitionDB::get_block_snapshot_consensus(self.sortdb.conn(), &canonical_stacks_tip_ch) - .map_err(|e| { - error!("Relayer: failed to get block snapshot for canonical tip: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })? - .ok_or_else(|| { - error!("Relayer: failed to get block snapshot for canonical tip"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })?; - - let Some(ref mining_key) = self.config.miner.mining_key else { + let Some(mining_pkh) = self.get_mining_key_pkh() else { return Ok(()); }; - let mining_pkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(mining_key)); - - let last_winner_snapshot = { - let ih = self.sortdb.index_handle(&burn_tip.sortition_id); - ih.get_last_snapshot_with_sortition(burn_tip.block_height) - .map_err(|e| { - error!("Relayer: failed to get last snapshot with sortition: {e:?}"); - NakamotoNodeError::SnapshotNotFoundForChainTip - })? - }; + let highest_tenure_bhh = + self.get_tenure_bhh(&canonical_stacks_tip, &canonical_stacks_tip_ch)?; + let last_good_block_election_snapshot = self.get_last_good_block_snapshot( + &burn_tip, + &highest_tenure_bhh, + &canonical_stacks_tip_ch, + )?; - let won_last_sortition = last_winner_snapshot.miner_pk_hash == Some(mining_pkh); - debug!( - "Relayer: Current burn block had no sortition. Checking for tenure continuation."; + let won_last_sortition = + last_good_block_election_snapshot.miner_pk_hash == Some(mining_pkh); + info!( + "Relayer: Current burn block had no sortition or a bad sortition. Checking for tenure continuation."; "won_last_sortition" => won_last_sortition, "current_mining_pkh" => %mining_pkh, - "last_winner_snapshot.miner_pk_hash" => ?last_winner_snapshot.miner_pk_hash, + "last_good_block_election_snapshot.consensus_hash" => %last_good_block_election_snapshot.consensus_hash, + "last_good_block_election_snapshot.miner_pk_hash" => ?last_good_block_election_snapshot.miner_pk_hash, "canonical_stacks_tip_id" => %canonical_stacks_tip, "canonical_stacks_tip_ch" => %canonical_stacks_tip_ch, - "block_election_ch" => %block_election_snapshot.consensus_hash, "burn_view_ch" => %new_burn_view, ); if !won_last_sortition { + info!("Relayer: Did not win the last sortition. Cannot continue tenure."); return Ok(()); } - match self.start_new_tenure( - canonical_stacks_tip, // For tenure extend, we should be extending off the canonical tip + let canonical_snapshot = SortitionDB::get_block_snapshot_consensus( + self.sortdb.conn(), + &canonical_stacks_tip_ch, + )? + .ok_or_else(|| { + error!("Relayer: failed to get block snapshot for canonical tip"); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + let (parent_tenure_start, block_election_snapshot, reason) = self.determine_tenure_type( + canonical_snapshot, + last_good_block_election_snapshot, + new_burn_view, + mining_pkh, + ); + + if let Err(e) = self.start_new_tenure( + parent_tenure_start, block_election_snapshot, burn_tip, - MinerReason::Extended { - burn_view_consensus_hash: new_burn_view, - }, + reason, ) { - Ok(()) => { - debug!("Relayer: successfully started new tenure."); - } - Err(e) => { - error!("Relayer: Failed to start new tenure: {e:?}"); - } + error!("Relayer: Failed to start new tenure: {e:?}"); + } else { + debug!("Relayer: successfully started new tenure."); } Ok(()) } @@ -955,7 +1099,7 @@ impl RelayerThread { return true; } Err(e) => { - warn!("Relayer: process_sortition returned {:?}", &e); + warn!("Relayer: process_sortition returned {e:?}"); return false; } }; @@ -1003,13 +1147,7 @@ impl RelayerThread { #[cfg(test)] fn fault_injection_skip_block_commit(&self) -> bool { - self.globals - .counters - .naka_skip_commit_op - .0 - .lock() - .unwrap() - .unwrap_or(false) + self.globals.counters.naka_skip_commit_op.get() } #[cfg(not(test))] @@ -1023,24 +1161,23 @@ impl RelayerThread { tip_block_ch: ConsensusHash, tip_block_bh: BlockHeaderHash, ) -> Result<(), NakamotoNodeError> { - let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?; if self.fault_injection_skip_block_commit() { warn!("Relayer: not submitting block-commit to bitcoin network due to test directive."); return Ok(()); } + let mut last_committed = self.make_block_commit(&tip_block_ch, &tip_block_bh)?; // last chance -- is this still the stacks tip? let (cur_stacks_tip_ch, cur_stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap_or_else( |e| { - panic!("Failed to load canonical stacks tip: {:?}", &e); + panic!("Failed to load canonical stacks tip: {e:?}"); }, ); if cur_stacks_tip_ch != tip_block_ch || cur_stacks_tip_bh != tip_block_bh { info!( - "Stacks tip changed prior to commit: {}/{} != {}/{}", - &cur_stacks_tip_ch, &cur_stacks_tip_bh, &tip_block_ch, &tip_block_bh + "Stacks tip changed prior to commit: {cur_stacks_tip_ch}/{cur_stacks_tip_bh} != {tip_block_ch}/{tip_block_bh}" ); return Err(NakamotoNodeError::StacksTipChanged); } @@ -1050,16 +1187,12 @@ impl RelayerThread { &StacksBlockId::new(&tip_block_ch, &tip_block_bh), ) .map_err(|e| { - warn!( - "Relayer: failed to load tip {}/{}: {:?}", - &tip_block_ch, &tip_block_bh, &e - ); + warn!("Relayer: failed to load tip {tip_block_ch}/{tip_block_bh}: {e:?}"); NakamotoNodeError::ParentNotFound })? .map(|header| header.stacks_block_height) else { warn!( - "Relayer: failed to load height for tip {}/{} (got None)", - &tip_block_ch, &tip_block_bh + "Relayer: failed to load height for tip {tip_block_ch}/{tip_block_bh} (got None)" ); return Err(NakamotoNodeError::ParentNotFound); }; @@ -1067,7 +1200,7 @@ impl RelayerThread { // sign and broadcast let mut op_signer = self.keychain.generate_op_signer(); let res = self.bitcoin_controller.submit_operation( - last_committed.get_epoch_id().clone(), + *last_committed.get_epoch_id(), BlockstackOperationType::LeaderBlockCommit(last_committed.get_block_commit().clone()), &mut op_signer, 1, @@ -1131,7 +1264,7 @@ impl RelayerThread { // load up canonical sortition and stacks tips let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()).map_err(|e| { - error!("Failed to load canonical sortition tip: {:?}", &e); + error!("Failed to load canonical sortition tip: {e:?}"); e }) else { @@ -1141,7 +1274,7 @@ impl RelayerThread { // NOTE: this may be an epoch2x tip let Ok((stacks_tip_ch, stacks_tip_bh)) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).map_err(|e| { - error!("Failed to load canonical stacks tip: {:?}", &e); + error!("Failed to load canonical stacks tip: {e:?}"); e }) else { @@ -1179,6 +1312,32 @@ impl RelayerThread { return None; } + if !highest_tenure_changed { + debug!("Relayer: burnchain view changed, but highest tenure did not"); + // The burnchain view changed, but the highest tenure did not, so + // wait a bit for the first block in the new tenure to arrive. This + // is to avoid submitting a block commit that will be immediately + // RBFed when the first block arrives. + if let Some(new_tenure_timeout) = self.new_tenure_timeout { + debug!( + "Relayer: {}s elapsed since burn block arrival", + new_tenure_timeout.elapsed().as_secs(), + ); + if new_tenure_timeout.elapsed() < self.config.miner.block_commit_delay { + return None; + } + } else { + info!( + "Relayer: starting new tenure timeout for {}s", + self.config.miner.block_commit_delay.as_secs() + ); + let timeout = Instant::now() + self.config.miner.block_commit_delay; + self.new_tenure_timeout = Some(Instant::now()); + self.next_initiative = timeout; + return None; + } + } + // burnchain view or highest-tenure view changed, so we need to send (or RBF) a commit Some(RelayerDirective::IssueBlockCommit( stacks_tip_ch, @@ -1198,7 +1357,7 @@ impl RelayerThread { while self.globals.keep_running() { let raised_initiative = self.globals.take_initiative(); let timed_out = Instant::now() >= self.next_initiative; - let directive = if raised_initiative.is_some() || timed_out { + let mut initiative_directive = if raised_initiative.is_some() || timed_out { self.next_initiative = Instant::now() + Duration::from_millis(self.config.node.next_initiative_delay); self.initiative() @@ -1206,13 +1365,17 @@ impl RelayerThread { None }; - let directive = if let Some(directive) = directive { + let directive = if let Some(directive) = initiative_directive.take() { directive } else { + // channel was drained, so do a time-bound recv match relay_rcv.recv_timeout(Duration::from_millis( self.config.node.next_initiative_delay, )) { - Ok(directive) => directive, + Ok(directive) => { + // only do this once, so we can call .initiative() again + directive + } Err(RecvTimeoutError::Timeout) => { continue; } @@ -1224,7 +1387,7 @@ impl RelayerThread { debug!("Relayer: main loop directive"; "directive" => %directive, - "raised_initiative" => %raised_initiative.unwrap_or("relay_rcv".to_string()), + "raised_initiative" => ?raised_initiative, "timed_out" => %timed_out); if !self.handle_directive(directive) { @@ -1246,25 +1409,19 @@ impl RelayerThread { let mut f = match fs::File::open(path) { Ok(f) => f, Err(e) => { - warn!("Could not open {}: {:?}", &path, &e); + warn!("Could not open {path}: {e:?}"); return None; } }; let mut registered_key_bytes = vec![]; if let Err(e) = f.read_to_end(&mut registered_key_bytes) { - warn!( - "Failed to read registered key bytes from {}: {:?}", - path, &e - ); + warn!("Failed to read registered key bytes from {path}: {e:?}"); return None; } let Ok(registered_key) = serde_json::from_slice::(®istered_key_bytes) else { - warn!( - "Did not load registered key from {}: could not decode JSON", - &path - ); + warn!("Did not load registered key from {path}: could not decode JSON"); return None; }; @@ -1274,7 +1431,7 @@ impl RelayerThread { return None; } - info!("Loaded registered key from {}", &path); + info!("Loaded registered key from {path}"); Some(registered_key) } @@ -1299,7 +1456,7 @@ impl RelayerThread { let mut saved_key_opt = None; if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() { saved_key_opt = - Self::load_saved_vrf_key(&path, &self.keychain.get_nakamoto_pkh()); + Self::load_saved_vrf_key(path, &self.keychain.get_nakamoto_pkh()); } if let Some(saved_key) = saved_key_opt { debug!("Relayer: resuming VRF key"); @@ -1371,9 +1528,9 @@ pub mod test { let pubkey_hash = Hash160::from_node_public_key(&pk); let path = "/tmp/does_not_exist.json"; - _ = std::fs::remove_file(&path); + _ = std::fs::remove_file(path); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); } @@ -1384,13 +1541,13 @@ pub mod test { let pubkey_hash = Hash160::from_node_public_key(&pk); let path = "/tmp/empty.json"; - File::create(&path).expect("Failed to create test file"); - assert!(Path::new(&path).exists()); + File::create(path).expect("Failed to create test file"); + assert!(Path::new(path).exists()); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } #[test] @@ -1403,15 +1560,15 @@ pub mod test { let json_content = r#"{ "hello": "world" }"#; // Write the JSON content to the file - let mut file = File::create(&path).expect("Failed to create test file"); + let mut file = File::create(path).expect("Failed to create test file"); file.write_all(json_content.as_bytes()) .expect("Failed to write to test file"); - assert!(Path::new(&path).exists()); + assert!(Path::new(path).exists()); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } #[test] @@ -1432,10 +1589,10 @@ pub mod test { let path = "/tmp/vrf_key.json"; save_activated_vrf_key(path, &key); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_some()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } #[test] @@ -1460,9 +1617,9 @@ pub mod test { let pk = Secp256k1PublicKey::from_private(keychain.get_nakamoto_sk()); let pubkey_hash = Hash160::from_node_public_key(&pk); - let res = RelayerThread::load_saved_vrf_key(&path, &pubkey_hash); + let res = RelayerThread::load_saved_vrf_key(path, &pubkey_hash); assert!(res.is_none()); - std::fs::remove_file(&path).expect("Failed to delete test file"); + std::fs::remove_file(path).expect("Failed to delete test file"); } } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 697dddeb03..2b1efcbfc5 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -16,7 +16,7 @@ use std::collections::BTreeMap; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Receiver; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use std::time::Duration; use hashbrown::{HashMap, HashSet}; @@ -43,7 +43,7 @@ use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; use super::Error as NakamotoNodeError; -use crate::event_dispatcher::STACKER_DB_CHANNEL; +use crate::event_dispatcher::StackerDBChannel; use crate::neon::Counters; use crate::Config; @@ -68,11 +68,16 @@ pub struct SignCoordinator { total_weight: u32, keep_running: Arc, pub next_signer_bitvec: BitVec<4000>, + stackerdb_channel: Arc>, } impl Drop for SignCoordinator { fn drop(&mut self) { - STACKER_DB_CHANNEL.replace_receiver(self.receiver.take().expect( + let stackerdb_channel = self + .stackerdb_channel + .lock() + .expect("FATAL: failed to lock stackerdb channel"); + stackerdb_channel.replace_receiver(self.receiver.take().expect( "FATAL: lost possession of the StackerDB channel before dropping SignCoordinator", )); } @@ -87,11 +92,12 @@ impl SignCoordinator { message_key: StacksPrivateKey, config: &Config, keep_running: Arc, + stackerdb_channel: Arc>, ) -> Result { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { error!("Could not initialize signing coordinator for reward set without signer"); - debug!("reward set: {:?}", &reward_set); + debug!("reward set: {reward_set:?}"); return Err(ChainstateError::NoRegisteredSigners(0)); }; @@ -150,7 +156,10 @@ impl SignCoordinator { use crate::tests::nakamoto_integrations::TEST_SIGNING; if TEST_SIGNING.lock().unwrap().is_some() { debug!("Short-circuiting spinning up coordinator from signer commitments. Using test signers channel."); - let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); + let (receiver, replaced_other) = stackerdb_channel + .lock() + .expect("FATAL: failed to lock StackerDB channel") + .register_miner_coordinator(); if replaced_other { warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); } @@ -164,12 +173,16 @@ impl SignCoordinator { weight_threshold: threshold, total_weight, keep_running, + stackerdb_channel, }; return Ok(sign_coordinator); } } - let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); + let (receiver, replaced_other) = stackerdb_channel + .lock() + .expect("FATAL: failed to lock StackerDB channel") + .register_miner_coordinator(); if replaced_other { warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); } @@ -184,10 +197,12 @@ impl SignCoordinator { weight_threshold: threshold, total_weight, keep_running, + stackerdb_channel, }) } /// Send a message over the miners contract using a `StacksPrivateKey` + #[allow(clippy::too_many_arguments)] pub fn send_miners_message( miner_sk: &StacksPrivateKey, sortdb: &SortitionDB, @@ -199,7 +214,7 @@ impl SignCoordinator { miners_session: &mut StackerDBSession, election_sortition: &ConsensusHash, ) -> Result<(), String> { - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &election_sortition) + let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, election_sortition) .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? else { return Err("No slot for miner".into()); @@ -222,7 +237,7 @@ impl SignCoordinator { .saturating_add(1); let mut chunk = StackerDBChunkData::new(slot_id, slot_version, message.serialize_to_vec()); chunk - .sign(&miner_sk) + .sign(miner_sk) .map_err(|_| "Failed to sign StackerDB chunk")?; match miners_session.put_chunk(&chunk) { @@ -270,13 +285,14 @@ impl SignCoordinator { /// to the signers, and then waits for the signers to respond /// with their signatures. It does so in two ways, concurrently: /// * It waits for signer StackerDB messages with signatures. If enough signatures can be - /// found, then the block can be broadcast. + /// found, then the block can be broadcast. /// * It waits for the chainstate to contain the relayed block. If so, then its signatures are - /// loaded and returned. This can happen if the node receives the block via a signer who - /// fetched all signatures and assembled the signature vector, all before we could. + /// loaded and returned. This can happen if the node receives the block via a signer who + /// fetched all signatures and assembled the signature vector, all before we could. // Mutants skip here: this function is covered via integration tests, // which the mutation testing does not see. #[cfg_attr(test, mutants::skip)] + #[allow(clippy::too_many_arguments)] pub fn run_sign_v0( &mut self, block: &NakamotoBlock, @@ -306,7 +322,7 @@ impl SignCoordinator { &self.message_key, sortdb, burn_tip, - &stackerdbs, + stackerdbs, block_proposal_message, MinerSlotID::BlockProposal, self.is_mainnet, @@ -355,9 +371,8 @@ impl SignCoordinator { .get_nakamoto_block(&block.block_id()) .map_err(|e| { warn!( - "Failed to query chainstate for block {}: {:?}", - &block.block_id(), - &e + "Failed to query chainstate for block {}: {e:?}", + &block.block_id() ); e }) @@ -367,7 +382,7 @@ impl SignCoordinator { return Ok(stored_block.header.signer_signature); } - if Self::check_burn_tip_changed(&sortdb, &burn_tip) { + if Self::check_burn_tip_changed(sortdb, burn_tip) { debug!("SignCoordinator: Exiting due to new burnchain tip"); return Err(NakamotoNodeError::BurnchainTipChanged); } @@ -549,8 +564,7 @@ impl SignCoordinator { }; responded_signers.insert(rejected_pubkey); debug!( - "Signer {} rejected our block {}/{}", - slot_id, + "Signer {slot_id} rejected our block {}/{}", &block.header.consensus_hash, &block.header.block_hash() ); @@ -562,8 +576,7 @@ impl SignCoordinator { > self.total_weight { debug!( - "{}/{} signers vote to reject our block {}/{}", - total_reject_weight, + "{total_reject_weight}/{} signers vote to reject our block {}/{}", self.total_weight, &block.header.consensus_hash, &block.header.block_hash() diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index dcfa855c9b..b688db100d 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -62,79 +62,85 @@ /// [11] Notifies about new transaction attachment events /// [12] Signals VRF key registration /// -/// When the node is running, there are 4-5 active threads at once. They are: +/// When the node is running, there are 4-5 active threads at once. They are: /// -/// * **RunLoop Thread**: This is the main thread, whose code body lives in src/run_loop/neon.rs. -/// This thread is responsible for: -/// * Bootup -/// * Running the burnchain indexer -/// * Notifying the ChainsCoordinator thread when there are new burnchain blocks to process +/// * **RunLoop Thread**: +/// This is the main thread, whose code body lives in `src/run_loop/neon.rs`. +/// This thread is responsible for: +/// * Bootup +/// * Running the burnchain indexer +/// * Notifying the ChainsCoordinator thread when there are new burnchain blocks to process /// -/// * **Relayer Thread**: This is the thread that stores and relays blocks and microblocks. Both -/// it and the ChainsCoordinator thread are very I/O-heavy threads, and care has been taken to -/// ensure that neither one attempts to acquire a write-lock in the underlying databases. -/// Specifically, this thread directs the ChainsCoordinator thread when to process new Stacks -/// blocks, and it directs the miner thread (if running) to stop when either it or the -/// ChainsCoordinator thread needs to acquire the write-lock. -/// This thread is responsible for: -/// * Receiving new blocks and microblocks from the P2P thread via a shared channel -/// * (Sychronously) requesting the CoordinatorThread to process newly-stored Stacks blocks and -/// microblocks -/// * Building up the node's unconfirmed microblock stream state, and sharing it with the P2P -/// thread so it can answer queries about the unconfirmed microblock chain -/// * Pushing newly-discovered blocks and microblocks to the P2P thread for broadcast -/// * Registering the VRF public key for the miner -/// * Spawning the block and microblock miner threads, and stopping them if their continued -/// execution would inhibit block or microblock storage or processing. -/// * Submitting the burnchain operation to commit to a freshly-mined block +/// * **Relayer Thread**: +/// This is the thread that stores and relays blocks and microblocks. Both +/// it and the ChainsCoordinator thread are very I/O-heavy threads, and care has been taken to +/// ensure that neither one attempts to acquire a write-lock in the underlying databases. +/// Specifically, this thread directs the ChainsCoordinator thread when to process new Stacks +/// blocks, and it directs the miner thread (if running) to stop when either it or the +/// ChainsCoordinator thread needs to acquire the write-lock. +/// This thread is responsible for: +/// * Receiving new blocks and microblocks from the P2P thread via a shared channel +/// * (Synchronously) requesting the CoordinatorThread to process newly-stored Stacks blocks +/// and microblocks +/// * Building up the node's unconfirmed microblock stream state, and sharing it with the P2P +/// thread so it can answer queries about the unconfirmed microblock chain +/// * Pushing newly-discovered blocks and microblocks to the P2P thread for broadcast +/// * Registering the VRF public key for the miner +/// * Spawning the block and microblock miner threads, and stopping them if their continued +/// execution would inhibit block or microblock storage or processing. +/// * Submitting the burnchain operation to commit to a freshly-mined block /// -/// * **Miner thread**: This is the thread that actually produces new blocks and microblocks. It -/// is spawned only by the Relayer thread to carry out mining activity when the underlying -/// chainstate is not needed by either the Relayer or ChainsCoordinator threeads. -/// This thread does the following: -/// * Walk the mempool DB to build a new block or microblock -/// * Return the block or microblock to the Relayer thread +/// * **Miner Thread**: +/// This is the thread that actually produces new blocks and microblocks. It +/// is spawned only by the Relayer thread to carry out mining activity when the underlying +/// chainstate is not needed by either the Relayer or ChainsCoordinator threads. +/// This thread does the following: +/// * Walk the mempool DB to build a new block or microblock +/// * Return the block or microblock to the Relayer thread /// -/// * **P2P Thread**: This is the thread that communicates with the rest of the p2p network, and -/// handles RPC requests. It is meant to do as little storage-write I/O as possible to avoid lock -/// contention with the Miner, Relayer, and ChainsCoordinator threads. In particular, it forwards -/// data it receives from the p2p thread to the Relayer thread for I/O-bound processing. At the -/// time of this writing, it still requires holding a write-lock to handle some RPC request, but -/// future work will remove this so that this thread's execution will not interfere with the -/// others. This is the only thread that does socket I/O. -/// This thread runs the PeerNetwork state machines, which include the following: -/// * Learning the node's public IP address -/// * Discovering neighbor nodes -/// * Forwarding newly-discovered blocks, microblocks, and transactions from the Relayer thread to -/// other neighbors -/// * Synchronizing block and microblock inventory state with other neighbors -/// * Downloading blocks and microblocks, and passing them to the Relayer for storage and processing -/// * Downloading transaction attachments as their hashes are discovered during block processing -/// * Synchronizing the local mempool database with other neighbors -/// (notifications for new attachments come from a shared channel in the ChainsCoordinator thread) -/// * Handling HTTP requests +/// * **P2P Thread**: +/// This is the thread that communicates with the rest of the P2P network, and +/// handles RPC requests. It is meant to do as little storage-write I/O as possible to avoid lock +/// contention with the Miner, Relayer, and ChainsCoordinator threads. In particular, it forwards +/// data it receives from the P2P thread to the Relayer thread for I/O-bound processing. At the +/// time of this writing, it still requires holding a write-lock to handle some RPC requests, but +/// future work will remove this so that this thread's execution will not interfere with the +/// others. This is the only thread that does socket I/O. +/// This thread runs the PeerNetwork state machines, which include the following: +/// * Learning the node's public IP address +/// * Discovering neighbor nodes +/// * Forwarding newly-discovered blocks, microblocks, and transactions from the Relayer thread +/// to other neighbors +/// * Synchronizing block and microblock inventory state with other neighbors +/// * Downloading blocks and microblocks, and passing them to the Relayer for storage and +/// processing +/// * Downloading transaction attachments as their hashes are discovered during block processing +/// * Synchronizing the local mempool database with other neighbors +/// (notifications for new attachments come from a shared channel in the ChainsCoordinator thread) +/// * Handling HTTP requests /// -/// * **ChainsCoordinator Thread**: This thread process sortitions and Stacks blocks and -/// microblocks, and handles PoX reorgs should they occur (this mainly happens in boot-up). It, -/// like the Relayer thread, is a very I/O-heavy thread, and it will hold a write-lock on the -/// chainstate DBs while it works. Its actions are controlled by a CoordinatorComms structure in -/// the Globals shared state, which the Relayer thread and RunLoop thread both drive (the former -/// drives Stacks blocks processing, the latter sortitions). -/// This thread is responsible for: -/// * Responding to requests from other threads to process sortitions -/// * Responding to requests from other threads to process Stacks blocks and microblocks -/// * Processing PoX chain reorgs, should they ever happen -/// * Detecting attachment creation events, and informing the P2P thread of them so it can go -/// and download them +/// * **ChainsCoordinator Thread**: +/// This thread processes sortitions and Stacks blocks and +/// microblocks, and handles PoX reorgs should they occur (this mainly happens in boot-up). It, +/// like the Relayer thread, is a very I/O-heavy thread, and it will hold a write-lock on the +/// chainstate DBs while it works. Its actions are controlled by a CoordinatorComms structure in +/// the Globals shared state, which the Relayer thread and RunLoop thread both drive (the former +/// drives Stacks blocks processing, the latter sortitions). +/// This thread is responsible for: +/// * Responding to requests from other threads to process sortitions +/// * Responding to requests from other threads to process Stacks blocks and microblocks +/// * Processing PoX chain reorgs, should they ever happen +/// * Detecting attachment creation events, and informing the P2P thread of them so it can go +/// and download them /// /// In addition to the mempool and chainstate databases, these threads share access to a Globals -/// singleton that contains soft state shared between threads. Mainly, the Globals struct is meant -/// to store inter-thread shared singleton communication media all in one convenient struct. Each -/// thread has a handle to the struct's shared state handles. Global state includes: -/// * The global flag as to whether or not the miner thread can be running -/// * The global shutdown flag that, when set, causes all threads to terminate -/// * Sender channel endpoints that can be shared between threads -/// * Metrics about the node's behavior (e.g. number of blocks processed, etc.) +/// singleton that contains soft state shared between threads. Mainly, the Globals struct is meant +/// to store inter-thread shared singleton communication media all in one convenient struct. Each +/// thread has a handle to the struct's shared state handles. Global state includes: +/// * The global flag as to whether or not the miner thread can be running +/// * The global shutdown flag that, when set, causes all threads to terminate +/// * Sender channel endpoints that can be shared between threads +/// * Metrics about the node's behavior (e.g. number of blocks processed, etc.) /// /// This file may be refactored in the future into a full-fledged module. use std::cmp; @@ -182,7 +188,7 @@ use stacks::chainstate::stacks::{ TransactionAnchorMode, TransactionPayload, TransactionVersion, }; use stacks::core::mempool::MemPoolDB; -use stacks::core::{FIRST_BURNCHAIN_CONSENSUS_HASH, STACKS_EPOCH_3_0_MARKER}; +use stacks::core::{EpochList, FIRST_BURNCHAIN_CONSENSUS_HASH, STACKS_EPOCH_3_0_MARKER}; use stacks::cost_estimates::metrics::{CostMetric, UnitMetric}; use stacks::cost_estimates::{CostEstimator, FeeEstimator, UnitEstimator}; use stacks::monitoring::{increment_stx_blocks_mined_counter, update_active_miners_count_gauge}; @@ -230,6 +236,7 @@ pub const BLOCK_PROCESSOR_STACK_SIZE: usize = 32 * 1024 * 1024; // 32 MB type MinedBlocks = HashMap; /// Result of running the miner thread. It could produce a Stacks block or a microblock. +#[allow(clippy::large_enum_variant)] pub(crate) enum MinerThreadResult { Block( AssembledAnchorBlock, @@ -303,10 +310,7 @@ pub(crate) fn fault_injection_long_tenure() { error!("Parse error for STX_TEST_SLOW_TENURE"); panic!(); }; - info!( - "Fault injection: sleeping for {} milliseconds to simulate a long tenure", - tenure_time - ); + info!("Fault injection: sleeping for {tenure_time} milliseconds to simulate a long tenure"); stacks_common::util::sleep_ms(tenure_time); } @@ -571,10 +575,7 @@ impl MicroblockMinerThread { // This is an artifact of the way the MARF is built (see #1449) let sortdb = SortitionDB::open(&burn_db_path, true, burnchain.pox_constants) .map_err(|e| { - error!( - "Relayer: Could not open sortdb '{}' ({:?}); skipping tenure", - &burn_db_path, &e - ); + error!("Relayer: Could not open sortdb '{burn_db_path}' ({e:?}); skipping tenure"); e }) .ok()?; @@ -582,8 +583,7 @@ impl MicroblockMinerThread { let mut chainstate = open_chainstate_with_faults(&config) .map_err(|e| { error!( - "Relayer: Could not open chainstate '{}' ({:?}); skipping microblock tenure", - &stacks_chainstate_path, &e + "Relayer: Could not open chainstate '{stacks_chainstate_path}' ({e:?}); skipping microblock tenure" ); e }) @@ -605,10 +605,7 @@ impl MicroblockMinerThread { .. } = miner_tip; - debug!( - "Relayer: Instantiate microblock mining state off of {}/{}", - &ch, &bhh - ); + debug!("Relayer: Instantiate microblock mining state off of {ch}/{bhh}"); // we won a block! proceed to build a microblock tail if we've stored it match StacksChainState::get_anchored_block_header_info(chainstate.db(), &ch, &bhh) { @@ -646,8 +643,8 @@ impl MicroblockMinerThread { sortdb: Some(sortdb), mempool: Some(mempool), event_dispatcher: relayer_thread.event_dispatcher.clone(), - parent_consensus_hash: ch.clone(), - parent_block_hash: bhh.clone(), + parent_consensus_hash: ch, + parent_block_hash: bhh, miner_key, frequency, last_mined: 0, @@ -657,17 +654,11 @@ impl MicroblockMinerThread { }) } Ok(None) => { - warn!( - "Relayer: No such anchored block: {}/{}. Cannot mine microblocks", - ch, bhh - ); + warn!("Relayer: No such anchored block: {ch}/{bhh}. Cannot mine microblocks"); None } Err(e) => { - warn!( - "Relayer: Failed to get anchored block cost for {}/{}: {:?}", - ch, bhh, &e - ); + warn!("Relayer: Failed to get anchored block cost for {ch}/{bhh}: {e:?}"); None } } @@ -719,7 +710,7 @@ impl MicroblockMinerThread { let block_snapshot = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &self.parent_consensus_hash) .map_err(|e| { - error!("Failed to find block snapshot for mined block: {}", e); + error!("Failed to find block snapshot for mined block: {e}"); e })? .ok_or_else(|| { @@ -729,13 +720,13 @@ impl MicroblockMinerThread { let burn_height = block_snapshot.block_height; let ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), burn_height).map_err(|e| { - error!("Failed to get AST rules for microblock: {}", e); + error!("Failed to get AST rules for microblock: {e}"); e })?; let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), burn_height) .map_err(|e| { - error!("Failed to get epoch for microblock: {}", e); + error!("Failed to get epoch for microblock: {e}"); e })? .expect("FATAL: no epoch defined") @@ -743,7 +734,7 @@ impl MicroblockMinerThread { let mint_result = { let ic = sortdb.index_handle_at_block( - &chainstate, + chainstate, &block_snapshot.get_canonical_stacks_block_id(), )?; let mut microblock_miner = match StacksMicroblockBuilder::resume_unconfirmed( @@ -755,10 +746,10 @@ impl MicroblockMinerThread { Ok(x) => x, Err(e) => { let msg = format!( - "Failed to create a microblock miner at chaintip {}/{}: {:?}", - &self.parent_consensus_hash, &self.parent_block_hash, &e + "Failed to create a microblock miner at chaintip {}/{}: {e:?}", + &self.parent_consensus_hash, &self.parent_block_hash ); - error!("{}", msg); + error!("{msg}"); return Err(e); } }; @@ -787,7 +778,7 @@ impl MicroblockMinerThread { let (mined_microblock, new_cost) = match mint_result { Ok(x) => x, Err(e) => { - warn!("Failed to mine microblock: {}", e); + warn!("Failed to mine microblock: {e}"); return Err(e); } }; @@ -810,25 +801,25 @@ impl MicroblockMinerThread { use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { // record this microblock somewhere - if !fs::metadata(&path).is_ok() { + if fs::metadata(&path).is_err() { fs::create_dir_all(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path}'")); } let path = Path::new(&path); let path = path.join(Path::new(&format!("{}", &mined_microblock.block_hash()))); let mut file = fs::File::create(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{:?}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path:?}'")); let mblock_bits = mined_microblock.serialize_to_vec(); let mblock_bits_hex = to_hex(&mblock_bits); let mblock_json = format!( - r#"{{"microblock":"{}","parent_consensus":"{}","parent_block":"{}"}}"#, - &mblock_bits_hex, &self.parent_consensus_hash, &self.parent_block_hash + r#"{{"microblock":"{mblock_bits_hex}","parent_consensus":"{}","parent_block":"{}"}}"#, + &self.parent_consensus_hash, &self.parent_block_hash ); - file.write_all(&mblock_json.as_bytes()).unwrap_or_else(|_| { - panic!("FATAL: failed to write microblock bits to '{:?}'", &path) + file.write_all(mblock_json.as_bytes()).unwrap_or_else(|_| { + panic!("FATAL: failed to write microblock bits to '{path:?}'") }); info!( "Fault injection: bad microblock {} saved to {}", @@ -873,7 +864,7 @@ impl MicroblockMinerThread { // update unconfirmed state cost self.cost_so_far = new_cost; self.quantity += 1; - return Ok(mined_microblock); + Ok(mined_microblock) } /// Can this microblock miner mine off of this given tip? @@ -926,11 +917,11 @@ impl MicroblockMinerThread { info!("Will keep polling mempool for transactions to include in a microblock"); } Err(e) => { - warn!("Failed to mine one microblock: {:?}", &e); + warn!("Failed to mine one microblock: {e:?}"); } } } else { - debug!("Will not mine microblocks yet -- have {} attachable blocks that arrived in the last 10 minutes", num_attachable); + debug!("Will not mine microblocks yet -- have {num_attachable} attachable blocks that arrived in the last 10 minutes"); } self.last_mined = get_epoch_time_ms(); @@ -1086,6 +1077,7 @@ impl BlockMinerThread { } /// Constructs and returns a LeaderBlockCommitOp out of the provided params. + #[allow(clippy::too_many_arguments)] fn inner_generate_block_commit_op( &self, block_header_hash: BlockHeaderHash, @@ -1202,7 +1194,7 @@ impl BlockMinerThread { .expect("FATAL: could not query chain tips") }; - if stacks_tips.len() == 0 { + if stacks_tips.is_empty() { return vec![]; } @@ -1213,7 +1205,7 @@ impl BlockMinerThread { .filter(|candidate| Self::is_on_canonical_burnchain_fork(candidate, &sortdb_tip_handle)) .collect(); - if stacks_tips.len() == 0 { + if stacks_tips.is_empty() { return vec![]; } @@ -1269,7 +1261,7 @@ impl BlockMinerThread { pub(crate) fn sort_and_populate_candidates( mut candidates: Vec, ) -> Vec { - if candidates.len() == 0 { + if candidates.is_empty() { return candidates; } candidates.sort_by(|tip1, tip2| { @@ -1373,7 +1365,7 @@ impl BlockMinerThread { // identify leaf tips -- i.e. blocks with no children let parent_consensus_hashes: HashSet<_> = stacks_tips .iter() - .map(|x| x.parent_consensus_hash.clone()) + .map(|x| x.parent_consensus_hash) .collect(); let mut leaf_tips: Vec<_> = stacks_tips @@ -1381,7 +1373,7 @@ impl BlockMinerThread { .filter(|x| !parent_consensus_hashes.contains(&x.consensus_hash)) .collect(); - if leaf_tips.len() == 0 { + if leaf_tips.is_empty() { return None; } @@ -1427,8 +1419,7 @@ impl BlockMinerThread { { // This leaf does not confirm a previous-best-tip, so assign it the // worst-possible score. - info!("Tip #{} {}/{} at {}:{} conflicts with a previous best-tip {}/{} at {}:{}", - i, + info!("Tip #{i} {}/{} at {}:{} conflicts with a previous best-tip {}/{} at {}:{}", &leaf_tip.consensus_hash, &leaf_tip.anchored_block_hash, leaf_tip.burn_height, @@ -1488,13 +1479,11 @@ impl BlockMinerThread { } info!( - "Tip #{} {}/{} at {}:{} has score {} ({})", - i, + "Tip #{i} {}/{} at {}:{} has score {score} ({})", &leaf_tip.consensus_hash, &leaf_tip.anchored_block_hash, leaf_tip.burn_height, leaf_tip.stacks_height, - score, score_summaries.join(" + ").to_string() ); if score < u64::MAX { @@ -1502,7 +1491,7 @@ impl BlockMinerThread { } } - if scores.len() == 0 { + if scores.is_empty() { // revert to prior tie-breaking scheme return None; } @@ -1519,8 +1508,8 @@ impl BlockMinerThread { .expect("FATAL: candidates should not be empty"); info!( - "Best tip is #{} {}/{}", - best_tip_idx, &best_tip.consensus_hash, &best_tip.anchored_block_hash + "Best tip is #{best_tip_idx} {}/{}", + &best_tip.consensus_hash, &best_tip.anchored_block_hash ); Some((*best_tip).clone()) } @@ -1576,14 +1565,14 @@ impl BlockMinerThread { let chain_tip = ChainTip::genesis( &burnchain_params.first_block_hash, - burnchain_params.first_block_height.into(), + burnchain_params.first_block_height, burnchain_params.first_block_timestamp.into(), ); ( Some(ParentStacksBlockInfo { stacks_parent_header: chain_tip.metadata, - parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH, parent_block_burn_height: 0, parent_block_total_burn: 0, parent_winning_vtxindex: 0, @@ -1671,7 +1660,7 @@ impl BlockMinerThread { { if (prev_block.anchored_block.header.parent_microblock == BlockHeaderHash([0u8; 32]) - && stream.len() == 0) + && stream.is_empty()) || (prev_block.anchored_block.header.parent_microblock != BlockHeaderHash([0u8; 32]) && stream.len() @@ -1682,9 +1671,9 @@ impl BlockMinerThread { if !force { // the chain tip hasn't changed since we attempted to build a block. Use what we // already have. - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no new microblocks ({} <= {} + 1)", + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {parent_block_burn_height}, and no new microblocks ({} <= {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); return None; } @@ -1693,36 +1682,32 @@ impl BlockMinerThread { // TODO: only consider rebuilding our anchored block if we (a) have // time, and (b) the new microblocks are worth more than the new BTC // fee minus the old BTC fee - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, but there are new microblocks ({} > {} + 1)", + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {parent_block_burn_height}, but there are new microblocks ({} > {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); best_attempt = cmp::max(best_attempt, prev_block.attempt); } - } else { - if !force { - // no microblock stream to confirm, and the stacks tip hasn't changed - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", - &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height); + } else if !force { + // no microblock stream to confirm, and the stacks tip hasn't changed + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {parent_block_burn_height}, and no microblocks present", + &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, + prev_block.anchored_block.txs.len(), prev_block.burn_hash); - return None; - } + return None; } + } else if self.burn_block.burn_header_hash == prev_block.burn_hash { + // only try and re-mine if there was no sortition since the last chain tip + info!("Relayer: Stacks tip has changed to {parent_consensus_hash}/{} since we last tried to mine a block in {} at burn height {parent_block_burn_height}; attempt was {} (for Stacks tip {}/{})", + stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); + best_attempt = cmp::max(best_attempt, prev_block.attempt); + // Since the chain tip has changed, we should try to mine a new block, even + // if it has less transactions than the previous block we mined, since that + // previous block would now be a reorg. + max_txs = 0; } else { - if self.burn_block.burn_header_hash == prev_block.burn_hash { - // only try and re-mine if there was no sortition since the last chain tip - info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", - parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); - best_attempt = cmp::max(best_attempt, prev_block.attempt); - // Since the chain tip has changed, we should try to mine a new block, even - // if it has less transactions than the previous block we mined, since that - // previous block would now be a reorg. - max_txs = 0; - } else { - info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", - &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); - } + info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", + &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); } } (best_attempt + 1, max_txs) @@ -1818,9 +1803,7 @@ impl BlockMinerThread { Ok(x) => { let num_mblocks = x.as_ref().map(|(mblocks, ..)| mblocks.len()).unwrap_or(0); debug!( - "Loaded {} microblocks descending from {}/{} (data: {})", - num_mblocks, - parent_consensus_hash, + "Loaded {num_mblocks} microblocks descending from {parent_consensus_hash}/{} (data: {})", &stacks_parent_header.anchored_header.block_hash(), x.is_some() ); @@ -1828,17 +1811,15 @@ impl BlockMinerThread { } Err(e) => { warn!( - "Failed to load descendant microblock stream from {}/{}: {:?}", - parent_consensus_hash, - &stacks_parent_header.anchored_header.block_hash(), - &e + "Failed to load descendant microblock stream from {parent_consensus_hash}/{}: {e:?}", + &stacks_parent_header.anchored_header.block_hash() ); None } }; if let Some((ref microblocks, ref poison_opt)) = µblock_info_opt { - if let Some(ref tail) = microblocks.last() { + if let Some(tail) = microblocks.last() { debug!( "Confirm microblock stream tailed at {} (seq {})", &tail.block_hash(), @@ -1848,11 +1829,10 @@ impl BlockMinerThread { // try and confirm as many microblocks as we can (but note that the stream itself may // be too long; we'll try again if that happens). - stacks_parent_header.microblock_tail = - microblocks.last().clone().map(|blk| blk.header.clone()); + stacks_parent_header.microblock_tail = microblocks.last().map(|blk| blk.header.clone()); if let Some(poison_payload) = poison_opt { - debug!("Detected poisoned microblock fork: {:?}", &poison_payload); + debug!("Detected poisoned microblock fork: {poison_payload:?}"); // submit it multiple times with different nonces, so it'll have a good chance of // eventually getting picked up (even if the miner sends other transactions from @@ -1868,21 +1848,15 @@ impl BlockMinerThread { if let Err(e) = mem_pool.miner_submit( chain_state, sortdb, - &parent_consensus_hash, + parent_consensus_hash, &stacks_parent_header.anchored_header.block_hash(), &poison_microblock_tx, Some(&self.event_dispatcher), 1_000_000_000.0, // prioritize this for inclusion ) { - warn!( - "Detected but failed to mine poison-microblock transaction: {:?}", - &e - ); + warn!("Detected but failed to mine poison-microblock transaction: {e:?}"); } else { - debug!( - "Submit poison-microblock transaction {:?}", - &poison_microblock_tx - ); + debug!("Submit poison-microblock transaction {poison_microblock_tx:?}"); } } } @@ -1915,11 +1889,12 @@ impl BlockMinerThread { } btc_addrs .into_iter() - .map(|addr| format!("{}", &addr)) + .map(|addr| format!("{addr}")) .collect() } /// Obtain the target burn fee cap, when considering how well this miner is performing. + #[allow(clippy::too_many_arguments)] pub fn get_mining_spend_amount( config: &Config, keychain: &Keychain, @@ -1947,7 +1922,7 @@ impl BlockMinerThread { }; let Ok(tip) = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).map_err(|e| { - warn!("Failed to load canonical burn chain tip: {:?}", &e); + warn!("Failed to load canonical burn chain tip: {e:?}"); e }) else { return config_file_burn_fee_cap; @@ -1955,10 +1930,7 @@ impl BlockMinerThread { let tip = if let Some(at_burn_block) = at_burn_block.as_ref() { let ih = sortdb.index_handle(&tip.sortition_id); let Ok(Some(ancestor_tip)) = ih.get_block_snapshot_by_height(*at_burn_block) else { - warn!( - "Failed to load ancestor tip at burn height {}", - at_burn_block - ); + warn!("Failed to load ancestor tip at burn height {at_burn_block}"); return config_file_burn_fee_cap; }; ancestor_tip @@ -1968,13 +1940,13 @@ impl BlockMinerThread { let Ok(active_miners_and_commits) = MinerStats::get_active_miners(sortdb, at_burn_block) .map_err(|e| { - warn!("Failed to get active miners: {:?}", &e); + warn!("Failed to get active miners: {e:?}"); e }) else { return config_file_burn_fee_cap; }; - if active_miners_and_commits.len() == 0 { + if active_miners_and_commits.is_empty() { warn!("No active miners detected; using config file burn_fee_cap"); return config_file_burn_fee_cap; } @@ -1984,12 +1956,12 @@ impl BlockMinerThread { .map(|(miner, _cmt)| miner.as_str()) .collect(); - info!("Active miners: {:?}", &active_miners); + info!("Active miners: {active_miners:?}"); let Ok(unconfirmed_block_commits) = miner_stats .get_unconfirmed_commits(tip.block_height + 1, &active_miners) .map_err(|e| { - warn!("Failed to find unconfirmed block-commits: {}", &e); + warn!("Failed to find unconfirmed block-commits: {e}"); e }) else { @@ -2001,24 +1973,20 @@ impl BlockMinerThread { .map(|cmt| (cmt.apparent_sender.to_string(), cmt.burn_fee)) .collect(); - info!( - "Found unconfirmed block-commits: {:?}", - &unconfirmed_miners_and_amounts - ); + info!("Found unconfirmed block-commits: {unconfirmed_miners_and_amounts:?}"); let (spend_dist, _total_spend) = MinerStats::get_spend_distribution( &active_miners_and_commits, &unconfirmed_block_commits, - &recipients, + recipients, ); let win_probs = if miner_config.fast_rampup { // look at spends 6+ blocks in the future - let win_probs = MinerStats::get_future_win_distribution( + MinerStats::get_future_win_distribution( &active_miners_and_commits, &unconfirmed_block_commits, - &recipients, - ); - win_probs + recipients, + ) } else { // look at the current spends let Ok(unconfirmed_burn_dist) = miner_stats @@ -2031,21 +1999,20 @@ impl BlockMinerThread { at_burn_block, ) .map_err(|e| { - warn!("Failed to get unconfirmed burn distribution: {:?}", &e); + warn!("Failed to get unconfirmed burn distribution: {e:?}"); e }) else { return config_file_burn_fee_cap; }; - let win_probs = MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist); - win_probs + MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist) }; - info!("Unconfirmed spend distribution: {:?}", &spend_dist); + info!("Unconfirmed spend distribution: {spend_dist:?}"); info!( - "Unconfirmed win probabilities (fast_rampup={}): {:?}", - miner_config.fast_rampup, &win_probs + "Unconfirmed win probabilities (fast_rampup={}): {win_probs:?}", + miner_config.fast_rampup ); let miner_addrs = Self::get_miner_addrs(config, keychain); @@ -2056,8 +2023,8 @@ impl BlockMinerThread { .unwrap_or(0.0); info!( - "This miner's win probability at {} is {}", - tip.block_height, &win_prob + "This miner's win probability at {} is {win_prob}", + tip.block_height ); set_prior_winning_prob(tip.block_height, win_prob); @@ -2080,8 +2047,7 @@ impl BlockMinerThread { let prior_win_prob = get_prior_winning_prob(prior_burn_height); if prior_win_prob < config.miner.target_win_probability { info!( - "Miner underperformed in block {} ({}/{})", - prior_burn_height, underperformed_count, underperform_stop_threshold + "Miner underperformed in block {prior_burn_height} ({underperformed_count}/{underperform_stop_threshold})" ); underperformed_count += 1; } @@ -2102,6 +2068,7 @@ impl BlockMinerThread { /// Produce the block-commit for this anchored block, if we can. /// Returns the op on success /// Returns None if we fail somehow. + #[allow(clippy::too_many_arguments)] pub fn make_block_commit( &self, burn_db: &mut SortitionDB, @@ -2123,7 +2090,7 @@ impl BlockMinerThread { ) { Ok(x) => x, Err(e) => { - error!("Relayer: Failure fetching recipient set: {:?}", e); + error!("Relayer: Failure fetching recipient set: {e:?}"); return None; } }; @@ -2227,12 +2194,10 @@ impl BlockMinerThread { if let Some(highest_unprocessed_block_sn) = highest_unprocessed_block_sn_opt { if stacks_tip.anchored_header.height() + u64::from(burnchain.pox_constants.prepare_length) - - 1 - >= highest_unprocessed.height + > highest_unprocessed.height && highest_unprocessed_block_sn.block_height + u64::from(burnchain.pox_constants.prepare_length) - - 1 - >= sort_tip.block_height + > sort_tip.block_height { // we're close enough to the chain tip that it's a bad idea for us to mine // -- we'll likely create an orphan @@ -2243,7 +2208,7 @@ impl BlockMinerThread { } } // we can mine - return false; + false } /// Only used in mock signing to generate a peer info view @@ -2301,16 +2266,14 @@ impl BlockMinerThread { // Just wait a min amount of time for the mock signatures to come in while mock_signatures.len() < slot_ids.len() && mock_poll_start.elapsed() < timeout { let chunks = stackerdbs.get_latest_chunks(&signers_contract_id, &slot_ids)?; - for chunk in chunks { - if let Some(chunk) = chunk { - if let Ok(SignerMessage::MockSignature(mock_signature)) = - SignerMessage::consensus_deserialize(&mut chunk.as_slice()) + for chunk in chunks.into_iter().flatten() { + if let Ok(SignerMessage::MockSignature(mock_signature)) = + SignerMessage::consensus_deserialize(&mut chunk.as_slice()) + { + if mock_signature.mock_proposal == *mock_proposal + && !mock_signatures.contains(&mock_signature) { - if mock_signature.mock_proposal == *mock_proposal - && !mock_signatures.contains(&mock_signature) - { - mock_signatures.push(mock_signature); - } + mock_signatures.push(mock_signature); } } } @@ -2325,19 +2288,17 @@ impl BlockMinerThread { StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); let miner_slot_ids: Vec<_> = (0..MINER_SLOT_COUNT * 2).collect(); if let Ok(messages) = miners_stackerdb.get_latest_chunks(&miner_slot_ids) { - for message in messages { - if let Some(message) = message { - if message.is_empty() { - continue; - } - let Ok(SignerMessage::MockBlock(mock_block)) = - SignerMessage::consensus_deserialize(&mut message.as_slice()) - else { - continue; - }; - if mock_block.mock_proposal.peer_info == *peer_info { - return true; - } + for message in messages.into_iter().flatten() { + if message.is_empty() { + continue; + } + let Ok(SignerMessage::MockBlock(mock_block)) = + SignerMessage::consensus_deserialize(&mut message.as_slice()) + else { + continue; + }; + if mock_block.mock_proposal.peer_info == *peer_info { + return true; } } } @@ -2536,10 +2497,7 @@ impl BlockMinerThread { if cfg!(test) { if let Ok(mblock_pubkey_hash_str) = std::env::var("STACKS_MICROBLOCK_PUBKEY_HASH") { if let Ok(bad_pubkh) = Hash160::from_hex(&mblock_pubkey_hash_str) { - debug!( - "Fault injection: set microblock public key hash to {}", - &bad_pubkh - ); + debug!("Fault injection: set microblock public key hash to {bad_pubkh}"); pubkh = bad_pubkh } } @@ -2624,13 +2582,13 @@ impl BlockMinerThread { ) { Ok(block) => block, Err(e) => { - error!("Relayer: Failure mining anchor block even after removing offending microblock {}: {}", &mblock_header_hash, &e); + error!("Relayer: Failure mining anchor block even after removing offending microblock {mblock_header_hash}: {e}"); return None; } } } Err(e) => { - error!("Relayer: Failure mining anchored block: {}", e); + error!("Relayer: Failure mining anchored block: {e}"); return None; } }; @@ -2649,12 +2607,12 @@ impl BlockMinerThread { if miner_config.only_increase_tx_count && max_txs > u64::try_from(anchored_block.txs.len()).expect("too many txs") { - info!("Relayer: Succeeded assembling subsequent block with {} txs, but had previously produced a block with {} txs", anchored_block.txs.len(), max_txs); + info!("Relayer: Succeeded assembling subsequent block with {} txs, but had previously produced a block with {max_txs} txs", anchored_block.txs.len()); return None; } info!( - "Relayer: Succeeded assembling {} block #{}: {}, with {} txs, attempt {}", + "Relayer: Succeeded assembling {} block #{}: {}, with {} txs, attempt {attempt}", if parent_block_info.parent_block_total_burn == 0 { "Genesis" } else { @@ -2662,8 +2620,7 @@ impl BlockMinerThread { }, anchored_block.header.total_work.work, anchored_block.block_hash(), - anchored_block.txs.len(), - attempt + anchored_block.txs.len() ); // let's commit @@ -2780,7 +2737,7 @@ impl BlockMinerThread { return None; } Err(e) => { - warn!("Relayer: Failed to submit Bitcoin transaction: {:?}", e); + warn!("Relayer: Failed to submit Bitcoin transaction: {e:?}"); self.failed_to_submit_last_attempt = true; return None; } @@ -2941,9 +2898,7 @@ impl RelayerThread { /// chain tip? pub fn has_waited_for_latest_blocks(&self) -> bool { // a network download pass took place - (self.min_network_download_passes <= self.last_network_download_passes - // a network inv pass took place - && self.min_network_download_passes <= self.last_network_download_passes) + self.min_network_download_passes <= self.last_network_download_passes // we waited long enough for a download pass, but timed out waiting || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms() // we're not supposed to wait at all @@ -3022,7 +2977,7 @@ impl RelayerThread { net_receipts.processed_unconfirmed_state.receipts.len(); if num_unconfirmed_microblock_tx_receipts > 0 { if let Some(unconfirmed_state) = self.chainstate_ref().unconfirmed_state.as_ref() { - let canonical_tip = unconfirmed_state.confirmed_chain_tip.clone(); + let canonical_tip = unconfirmed_state.confirmed_chain_tip; self.event_dispatcher.process_new_microblocks( canonical_tip, net_receipts.processed_unconfirmed_state, @@ -3076,7 +3031,7 @@ impl RelayerThread { let burn_height = SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), consensus_hash) .map_err(|e| { - error!("Failed to find block snapshot for mined block: {}", e); + error!("Failed to find block snapshot for mined block: {e}"); e })? .ok_or_else(|| { @@ -3094,7 +3049,7 @@ impl RelayerThread { if !Relayer::static_check_problematic_relayed_block( self.chainstate_ref().mainnet, epoch_id, - &anchored_block, + anchored_block, ASTRules::PrecheckSize, ) { // nope! @@ -3107,24 +3062,22 @@ impl RelayerThread { use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { // record this block somewhere - if !fs::metadata(&path).is_ok() { + if fs::metadata(&path).is_err() { fs::create_dir_all(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path}'")); } let path = Path::new(&path); let path = path.join(Path::new(&format!("{}", &anchored_block.block_hash()))); let mut file = fs::File::create(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{:?}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path:?}'")); let block_bits = anchored_block.serialize_to_vec(); let block_bits_hex = to_hex(&block_bits); - let block_json = format!( - r#"{{"block":"{}","consensus":"{}"}}"#, - &block_bits_hex, &consensus_hash - ); - file.write_all(&block_json.as_bytes()).unwrap_or_else(|_| { - panic!("FATAL: failed to write block bits to '{:?}'", &path) + let block_json = + format!(r#"{{"block":"{block_bits_hex}","consensus":"{consensus_hash}"}}"#); + file.write_all(block_json.as_bytes()).unwrap_or_else(|_| { + panic!("FATAL: failed to write block bits to '{path:?}'") }); info!( "Fault injection: bad block {} saved to {}", @@ -3154,8 +3107,8 @@ impl RelayerThread { chainstate.preprocess_anchored_block( &ic, consensus_hash, - &anchored_block, - &parent_consensus_hash, + anchored_block, + parent_consensus_hash, 0, ) })?; @@ -3234,8 +3187,8 @@ impl RelayerThread { .expect("FATAL: unknown consensus hash"); debug!( - "Relayer: Process tenure {}/{} in {} burn height {}", - &consensus_hash, &block_header_hash, &burn_hash, sn.block_height + "Relayer: Process tenure {consensus_hash}/{block_header_hash} in {burn_hash} burn height {}", + sn.block_height ); if let Some((last_mined_block_data, microblock_privkey)) = @@ -3252,8 +3205,7 @@ impl RelayerThread { let reward_block_height = mined_block.header.total_work.work + MINER_REWARD_MATURITY; info!( - "Relayer: Won sortition! Mining reward will be received in {} blocks (block #{})", - MINER_REWARD_MATURITY, reward_block_height + "Relayer: Won sortition! Mining reward will be received in {MINER_REWARD_MATURITY} blocks (block #{reward_block_height})" ); debug!("Relayer: Won sortition!"; "stacks_header" => %block_header_hash, @@ -3272,7 +3224,7 @@ impl RelayerThread { return (false, None); } Err(e) => { - warn!("Error processing my tenure, bad block produced: {}", e); + warn!("Error processing my tenure, bad block produced: {e}"); warn!( "Bad block"; "stacks_header" => %block_header_hash, @@ -3283,20 +3235,18 @@ impl RelayerThread { }; // advertize _and_ push blocks for now - let blocks_available = Relayer::load_blocks_available_data( - self.sortdb_ref(), - vec![consensus_hash.clone()], - ) - .expect("Failed to obtain block information for a block we mined."); + let blocks_available = + Relayer::load_blocks_available_data(self.sortdb_ref(), vec![consensus_hash]) + .expect("Failed to obtain block information for a block we mined."); let block_data = { let mut bd = HashMap::new(); - bd.insert(consensus_hash.clone(), mined_block.clone()); + bd.insert(consensus_hash, mined_block.clone()); bd }; if let Err(e) = self.relayer.advertize_blocks(blocks_available, block_data) { - warn!("Failed to advertise new block: {}", e); + warn!("Failed to advertise new block: {e}"); } let snapshot = SortitionDB::get_block_snapshot_consensus( @@ -3308,13 +3258,12 @@ impl RelayerThread { if !snapshot.pox_valid { warn!( - "Snapshot for {} is no longer valid; discarding {}...", - &consensus_hash, + "Snapshot for {consensus_hash} is no longer valid; discarding {}...", &mined_block.block_hash() ); miner_tip = Self::pick_higher_tip(miner_tip, None); } else { - let ch = snapshot.consensus_hash.clone(); + let ch = snapshot.consensus_hash; let bh = mined_block.block_hash(); let height = mined_block.header.total_work.work; @@ -3332,7 +3281,7 @@ impl RelayerThread { .relayer .broadcast_block(snapshot.consensus_hash, mined_block) { - warn!("Failed to push new block: {}", e); + warn!("Failed to push new block: {e}"); } } @@ -3355,8 +3304,7 @@ impl RelayerThread { } } else { debug!( - "Relayer: Did not win sortition in {}, winning block was {}/{}", - &burn_hash, &consensus_hash, &block_header_hash + "Relayer: Did not win sortition in {burn_hash}, winning block was {consensus_hash}/{block_header_hash}" ); miner_tip = None; } @@ -3391,7 +3339,7 @@ impl RelayerThread { let tenures = if let Some(last_ch) = self.last_tenure_consensus_hash.as_ref() { let mut tenures = vec![]; let last_sn = - SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), &last_ch) + SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), last_ch) .expect("FATAL: failed to query sortition DB") .expect("FATAL: unknown prior consensus hash"); @@ -3470,8 +3418,7 @@ impl RelayerThread { .expect("FATAL: failed to query sortition DB") .expect("FATAL: no snapshot for consensus hash"); - let old_last_mined_blocks = - mem::replace(&mut self.last_mined_blocks, MinedBlocks::new()); + let old_last_mined_blocks = mem::take(&mut self.last_mined_blocks); self.last_mined_blocks = Self::clear_stale_mined_blocks(this_burn_tip.block_height, old_last_mined_blocks); @@ -3492,11 +3439,9 @@ impl RelayerThread { || mtip.block_hash != stacks_tip_block_hash { debug!( - "Relayer: miner tip {}/{} is NOT canonical ({}/{})", + "Relayer: miner tip {}/{} is NOT canonical ({stacks_tip_consensus_hash}/{stacks_tip_block_hash})", &mtip.consensus_hash, &mtip.block_hash, - &stacks_tip_consensus_hash, - &stacks_tip_block_hash ); miner_tip = None; } else { @@ -3553,14 +3498,11 @@ impl RelayerThread { /// cost since we won't be mining it anymore. fn setup_microblock_mining_state(&mut self, new_miner_tip: Option) { // update state - let my_miner_tip = std::mem::replace(&mut self.miner_tip, None); + let my_miner_tip = std::mem::take(&mut self.miner_tip); let best_tip = Self::pick_higher_tip(my_miner_tip.clone(), new_miner_tip.clone()); if best_tip == new_miner_tip && best_tip != my_miner_tip { // tip has changed - debug!( - "Relayer: Best miner tip went from {:?} to {:?}", - &my_miner_tip, &new_miner_tip - ); + debug!("Relayer: Best miner tip went from {my_miner_tip:?} to {new_miner_tip:?}"); self.microblock_stream_cost = ExecutionCost::zero(); } self.miner_tip = best_tip; @@ -3597,7 +3539,7 @@ impl RelayerThread { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo, - consensus_hash: consensus_hash.clone(), + consensus_hash: *consensus_hash, vtxindex: 0, txid: Txid([0u8; 32]), block_height: 0, @@ -3660,14 +3602,14 @@ impl RelayerThread { for (stacks_bhh, (assembled_block, microblock_privkey)) in last_mined_blocks.into_iter() { if assembled_block.burn_block_height < burn_height { debug!( - "Stale mined block: {} (as of {},{})", - &stacks_bhh, &assembled_block.burn_hash, assembled_block.burn_block_height + "Stale mined block: {stacks_bhh} (as of {},{})", + &assembled_block.burn_hash, assembled_block.burn_block_height ); continue; } debug!( - "Mined block in-flight: {} (as of {},{})", - &stacks_bhh, &assembled_block.burn_hash, assembled_block.burn_block_height + "Mined block in-flight: {stacks_bhh} (as of {},{})", + &assembled_block.burn_hash, assembled_block.burn_block_height ); ret.insert(stacks_bhh, (assembled_block, microblock_privkey)); } @@ -3676,14 +3618,14 @@ impl RelayerThread { /// Create the block miner thread state. /// Only proceeds if all of the following are true: - /// * the miner is not blocked - /// * last_burn_block corresponds to the canonical sortition DB's chain tip - /// * the time of issuance is sufficiently recent - /// * there are no unprocessed stacks blocks in the staging DB - /// * the relayer has already tried a download scan that included this sortition (which, if a - /// block was found, would have placed it into the staging DB and marked it as - /// unprocessed) - /// * a miner thread is not running already + /// * The miner is not blocked + /// * `last_burn_block` corresponds to the canonical sortition DB's chain tip + /// * The time of issuance is sufficiently recent + /// * There are no unprocessed stacks blocks in the staging DB + /// * The relayer has already tried a download scan that included this sortition (which, if a + /// block was found, would have placed it into the staging DB and marked it as + /// unprocessed) + /// * A miner thread is not running already fn create_block_miner( &mut self, registered_key: RegisteredKey, @@ -3724,16 +3666,15 @@ impl RelayerThread { } } - let burn_header_hash = last_burn_block.burn_header_hash.clone(); + let burn_header_hash = last_burn_block.burn_header_hash; let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); + let burn_chain_tip = burn_chain_sn.burn_header_hash; if burn_chain_tip != burn_header_hash { debug!( - "Relayer: Drop stale RunTenure for {}: current sortition is for {}", - &burn_header_hash, &burn_chain_tip + "Relayer: Drop stale RunTenure for {burn_header_hash}: current sortition is for {burn_chain_tip}" ); self.globals.counters.bump_missed_tenures(); return None; @@ -3749,8 +3690,7 @@ impl RelayerThread { ); if has_unprocessed { debug!( - "Relayer: Drop RunTenure for {} because there are fewer than {} pending blocks", - &burn_header_hash, + "Relayer: Drop RunTenure for {burn_header_hash} because there are fewer than {} pending blocks", self.burnchain.pox_constants.prepare_length - 1 ); return None; @@ -3780,7 +3720,7 @@ impl RelayerThread { // if we're still mining on this burn block, then do nothing if self.miner_thread.is_some() { - debug!("Relayer: will NOT run tenure since miner thread is already running for burn tip {}", &burn_chain_tip); + debug!("Relayer: will NOT run tenure since miner thread is already running for burn tip {burn_chain_tip}"); return None; } @@ -3797,6 +3737,7 @@ impl RelayerThread { /// Try to start up a block miner thread with this given VRF key and current burnchain tip. /// Returns true if the thread was started; false if it was not (for any reason) + #[allow(clippy::incompatible_msrv)] pub fn block_miner_thread_try_start( &mut self, registered_key: RegisteredKey, @@ -3827,7 +3768,7 @@ impl RelayerThread { .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { if let Err(e) = miner_thread_state.send_mock_miner_messages() { - warn!("Failed to send mock miner messages: {}", e); + warn!("Failed to send mock miner messages: {e}"); } miner_thread_state.run_tenure() }) @@ -3898,11 +3839,13 @@ impl RelayerThread { true } - /// Start up a microblock miner thread if we can: - /// * no miner thread must be running already - /// * the miner must not be blocked - /// * we must have won the sortition on the stacks chain tip - /// Returns true if the thread was started; false if not. + /// Start up a microblock miner thread if possible: + /// * No miner thread must be running already + /// * The miner must not be blocked + /// * We must have won the sortition on the Stacks chain tip + /// + /// Returns `true` if the thread was started; `false` if not. + #[allow(clippy::incompatible_msrv)] pub fn microblock_miner_thread_try_start(&mut self) -> bool { let miner_tip = match self.miner_tip.as_ref() { Some(tip) => tip.clone(), @@ -3948,10 +3891,7 @@ impl RelayerThread { let parent_consensus_hash = &miner_tip.consensus_hash; let parent_block_hash = &miner_tip.block_hash; - debug!( - "Relayer: Run microblock tenure for {}/{}", - parent_consensus_hash, parent_block_hash - ); + debug!("Relayer: Run microblock tenure for {parent_consensus_hash}/{parent_block_hash}"); let Some(mut microblock_thread_state) = MicroblockMinerThread::from_relayer_thread(self) else { @@ -4003,8 +3943,7 @@ impl RelayerThread { last_mined_block.burn_block_height, &self.last_mined_blocks, ) - .len() - == 0 + .is_empty() { // first time we've mined a block in this burnchain block debug!( @@ -4019,8 +3958,8 @@ impl RelayerThread { &last_mined_block.anchored_block.block_hash() ); - let bhh = last_mined_block.burn_hash.clone(); - let orig_bhh = last_mined_block.orig_burn_hash.clone(); + let bhh = last_mined_block.burn_hash; + let orig_bhh = last_mined_block.orig_burn_hash; let tenure_begin = last_mined_block.tenure_begin; self.last_mined_blocks.insert( @@ -4033,11 +3972,9 @@ impl RelayerThread { .set_ongoing_commit(ongoing_commit_opt); debug!( - "Relayer: RunTenure finished at {} (in {}ms) targeting {} (originally {})", + "Relayer: RunTenure finished at {} (in {}ms) targeting {bhh} (originally {orig_bhh})", self.last_tenure_issue_time, - self.last_tenure_issue_time.saturating_sub(tenure_begin), - &bhh, - &orig_bhh + self.last_tenure_issue_time.saturating_sub(tenure_begin) ); // this stacks block confirms all in-flight microblocks we know about, @@ -4058,7 +3995,7 @@ impl RelayerThread { let num_mblocks = chainstate .unconfirmed_state .as_ref() - .map(|ref unconfirmed| unconfirmed.num_microblocks()) + .map(|unconfirmed| unconfirmed.num_microblocks()) .unwrap_or(0); (processed_unconfirmed_state, num_mblocks) @@ -4066,11 +4003,9 @@ impl RelayerThread { ); info!( - "Mined one microblock: {} seq {} txs {} (total processed: {})", - µblock_hash, + "Mined one microblock: {microblock_hash} seq {} txs {} (total processed: {num_mblocks})", next_microblock.header.sequence, - next_microblock.txs.len(), - num_mblocks + next_microblock.txs.len() ); self.globals.counters.set_microblocks_processed(num_mblocks); @@ -4090,8 +4025,7 @@ impl RelayerThread { next_microblock, ) { error!( - "Failure trying to broadcast microblock {}: {}", - microblock_hash, e + "Failure trying to broadcast microblock {microblock_hash}: {e}" ); } @@ -4116,7 +4050,7 @@ impl RelayerThread { self.mined_stacks_block = false; } Err(e) => { - warn!("Relayer: Failed to mine next microblock: {:?}", &e); + warn!("Relayer: Failed to mine next microblock: {e:?}"); // switch back to block mining self.mined_stacks_block = false; @@ -4134,14 +4068,16 @@ impl RelayerThread { None } - /// Try to join with the miner thread. If we succeed, join the thread and return true. - /// Otherwise, if the thread is still running, return false; + /// Try to join with the miner thread. If successful, join the thread and return `true`. + /// Otherwise, if the thread is still running, return `false`. + /// /// Updates internal state gleaned from the miner, such as: - /// * new stacks block data - /// * new keychain state - /// * new metrics - /// * new unconfirmed state - /// Returns true if joined; false if not. + /// * New Stacks block data + /// * New keychain state + /// * New metrics + /// * New unconfirmed state + /// + /// Returns `true` if joined; `false` if not. pub fn miner_thread_try_join(&mut self) -> bool { if let Some(thread_handle) = self.miner_thread.take() { let new_thread_handle = self.inner_miner_thread_try_join(thread_handle); @@ -4155,28 +4091,22 @@ impl RelayerThread { let mut f = match fs::File::open(path) { Ok(f) => f, Err(e) => { - warn!("Could not open {}: {:?}", &path, &e); + warn!("Could not open {path}: {e:?}"); return None; } }; let mut registered_key_bytes = vec![]; if let Err(e) = f.read_to_end(&mut registered_key_bytes) { - warn!( - "Failed to read registered key bytes from {}: {:?}", - path, &e - ); + warn!("Failed to read registered key bytes from {path}: {e:?}"); return None; } let Ok(registered_key) = serde_json::from_slice(®istered_key_bytes) else { - warn!( - "Did not load registered key from {}: could not decode JSON", - &path - ); + warn!("Did not load registered key from {path}: could not decode JSON"); return None; }; - info!("Loaded registered key from {}", &path); + info!("Loaded registered key from {path}"); Some(registered_key) } @@ -4193,7 +4123,7 @@ impl RelayerThread { RelayerDirective::RegisterKey(last_burn_block) => { let mut saved_key_opt = None; if let Some(path) = self.config.miner.activated_vrf_key_path.as_ref() { - saved_key_opt = Self::load_saved_vrf_key(&path); + saved_key_opt = Self::load_saved_vrf_key(path); } if let Some(saved_key) = saved_key_opt { self.globals.resume_leader_key(saved_key); @@ -4266,8 +4196,8 @@ impl ParentStacksBlockInfo { ) -> Result { let stacks_tip_header = StacksChainState::get_anchored_block_header_info( chain_state.db(), - &mine_tip_ch, - &mine_tip_bh, + mine_tip_ch, + mine_tip_bh, ) .unwrap() .ok_or_else(|| { @@ -4335,9 +4265,9 @@ impl ParentStacksBlockInfo { return Err(Error::BurnchainTipChanged); } - debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {} (height {} hash {})", + debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {mine_tip_ch} (height {} hash {})", &check_burn_block.consensus_hash, check_burn_block.block_height, &check_burn_block.burn_header_hash, - mine_tip_ch, parent_snapshot.block_height, &parent_snapshot.burn_header_hash); + parent_snapshot.block_height, &parent_snapshot.burn_header_hash); let coinbase_nonce = { let principal = miner_address.into(); @@ -4349,8 +4279,7 @@ impl ParentStacksBlockInfo { ) .unwrap_or_else(|| { panic!( - "BUG: stacks tip block {}/{} no longer exists after we queried it", - mine_tip_ch, mine_tip_bh + "BUG: stacks tip block {mine_tip_ch}/{mine_tip_bh} no longer exists after we queried it" ) }); account.nonce @@ -4358,9 +4287,9 @@ impl ParentStacksBlockInfo { Ok(ParentStacksBlockInfo { stacks_parent_header: stacks_tip_header, - parent_consensus_hash: mine_tip_ch.clone(), + parent_consensus_hash: *mine_tip_ch, parent_block_burn_height: parent_block_height, - parent_block_total_burn: parent_block_total_burn, + parent_block_total_burn, parent_winning_vtxindex, coinbase_nonce, }) @@ -4412,16 +4341,14 @@ impl PeerThread { .make_cost_metric() .unwrap_or_else(|| Box::new(UnitMetric)); - let mempool = MemPoolDB::open( + MemPoolDB::open( config.is_mainnet(), config.burnchain.chain_id, &config.get_chainstate_path_str(), cost_estimator, metric, ) - .expect("Database failure opening mempool"); - - mempool + .expect("Database failure opening mempool") } /// Instantiate the p2p thread. @@ -4531,6 +4458,7 @@ impl PeerThread { /// Run one pass of the p2p/http state machine /// Return true if we should continue running passes; false if not + #[allow(clippy::borrowed_box)] pub fn run_one_pass( &mut self, indexer: &B, @@ -4542,12 +4470,11 @@ impl PeerThread { ) -> bool { // initial block download? let ibd = self.globals.sync_comms.get_ibd(); - let download_backpressure = self.results_with_data.len() > 0; + let download_backpressure = !self.results_with_data.is_empty(); let poll_ms = if !download_backpressure && self.get_network().has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( - "P2P: backpressure: {}, more downloads: {}", - download_backpressure, + "P2P: backpressure: {download_backpressure}, more downloads: {}", self.get_network().has_more_downloads() ); 1 @@ -4566,11 +4493,7 @@ impl PeerThread { // NOTE: handler_args must be created such that it outlives the inner net.run() call and // doesn't ref anything within p2p_thread. let handler_args = RPCHandlerArgs { - exit_at_block_height: p2p_thread - .config - .burnchain - .process_exit_at_block_height - .clone(), + exit_at_block_height: p2p_thread.config.burnchain.process_exit_at_block_height, genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) .unwrap(), event_observer: Some(event_dispatcher), @@ -4635,7 +4558,7 @@ impl PeerThread { Err(e) => { // this is only reachable if the network is not instantiated correctly -- // i.e. you didn't connect it - panic!("P2P: Failed to process network dispatch: {:?}", &e); + panic!("P2P: Failed to process network dispatch: {e:?}"); } }; @@ -4697,9 +4620,8 @@ impl StacksNode { pub(crate) fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { info!( - "Override burnchain height of {:?} to {}", - ASTRules::PrecheckSize, - ast_precheck_size_height + "Override burnchain height of {:?} to {ast_precheck_size_height}", + ASTRules::PrecheckSize ); let mut tx = sortdb .tx_begin() @@ -4726,32 +4648,32 @@ impl StacksNode { .make_cost_metric() .unwrap_or_else(|| Box::new(UnitMetric)); - let mempool = MemPoolDB::open( + MemPoolDB::open( config.is_mainnet(), config.burnchain.chain_id, &config.get_chainstate_path_str(), cost_estimator, metric, ) - .expect("BUG: failed to instantiate mempool"); - - mempool + .expect("BUG: failed to instantiate mempool") } - /// Set up the Peer DB and update any soft state from the config file. This includes: - /// * blacklisted/whitelisted nodes - /// * node keys - /// * bootstrap nodes - /// Returns the instantiated PeerDB + /// Set up the Peer DB and update any soft state from the config file. This includes: + /// * Blacklisted/whitelisted nodes + /// * Node keys + /// * Bootstrap nodes + /// + /// Returns the instantiated `PeerDB`. + /// /// Panics on failure. fn setup_peer_db( config: &Config, burnchain: &Burnchain, stackerdb_contract_ids: &[QualifiedContractIdentifier], ) -> PeerDB { - let data_url = UrlString::try_from(format!("{}", &config.node.data_url)).unwrap(); + let data_url = UrlString::try_from(config.node.data_url.to_string()).unwrap(); let initial_neighbors = config.node.bootstrap_node.clone(); - if initial_neighbors.len() > 0 { + if !initial_neighbors.is_empty() { info!( "Will bootstrap from peers {}", VecDisplay(&initial_neighbors) @@ -4778,7 +4700,7 @@ impl StacksNode { config.burnchain.chain_id, burnchain.network_id, Some(node_privkey), - config.connection_options.private_key_lifetime.clone(), + config.connection_options.private_key_lifetime, PeerAddress::from_socketaddr(&p2p_addr), p2p_sock.port(), data_url, @@ -4787,23 +4709,19 @@ impl StacksNode { stackerdb_contract_ids, ) .map_err(|e| { - eprintln!( - "Failed to open {}: {:?}", - &config.get_peer_db_file_path(), - &e - ); + eprintln!("Failed to open {}: {e:?}", &config.get_peer_db_file_path()); panic!(); }) .unwrap(); // allow all bootstrap nodes { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); for initial_neighbor in initial_neighbors.iter() { // update peer in case public key changed - PeerDB::update_peer(&mut tx, &initial_neighbor).unwrap(); + PeerDB::update_peer(&tx, initial_neighbor).unwrap(); PeerDB::set_allow_peer( - &mut tx, + &tx, initial_neighbor.addr.network_id, &initial_neighbor.addr.addrbytes, initial_neighbor.addr.port, @@ -4820,10 +4738,10 @@ impl StacksNode { // deny all config-denied peers { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); for denied in config.node.deny_nodes.iter() { PeerDB::set_deny_peer( - &mut tx, + &tx, denied.addr.network_id, &denied.addr.addrbytes, denied.addr.port, @@ -4836,9 +4754,9 @@ impl StacksNode { // update services to indicate we can support mempool sync and stackerdb { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); PeerDB::set_local_services( - &mut tx, + &tx, (ServiceFlags::RPC as u16) | (ServiceFlags::RELAY as u16) | (ServiceFlags::STACKERDB as u16), @@ -4863,11 +4781,12 @@ impl StacksNode { ) .expect("Error while instantiating sor/tition db"); - let epochs = SortitionDB::get_stacks_epochs(sortdb.conn()) + let epochs_vec = SortitionDB::get_stacks_epochs(sortdb.conn()) .expect("Error while loading stacks epochs"); + let epochs = EpochList::new(&epochs_vec); let view = { - let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("Failed to get sortition tip"); SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) .unwrap() @@ -4908,16 +4827,20 @@ impl StacksNode { stackerdb_machines.insert(contract_id, (stackerdb_config, stacker_db_sync)); } let peerdb = Self::setup_peer_db(config, &burnchain, &stackerdb_contract_ids); + let burnchain_db = burnchain + .open_burnchain_db(false) + .expect("Failed to open burnchain DB"); let local_peer = match PeerDB::get_local_peer(peerdb.conn()) { Ok(local_peer) => local_peer, _ => panic!("Unable to retrieve local peer"), }; - let p2p_net = PeerNetwork::new( + PeerNetwork::new( peerdb, atlasdb, stackerdbs, + burnchain_db, local_peer, config.burnchain.peer_version, burnchain, @@ -4925,9 +4848,7 @@ impl StacksNode { config.connection_options.clone(), stackerdb_machines, epochs, - ); - - p2p_net + ) } /// Main loop of the relayer. @@ -5042,7 +4963,7 @@ impl StacksNode { .get_miner_address(StacksEpochId::Epoch21, &public_key); let miner_addr_str = addr2str(&miner_addr); let _ = monitoring::set_burnchain_signer(BurnchainSigner(miner_addr_str)).map_err(|e| { - warn!("Failed to set global burnchain signer: {:?}", &e); + warn!("Failed to set global burnchain signer: {e:?}"); e }); } @@ -5223,9 +5144,9 @@ impl StacksNode { .globals .relay_send .send(RelayerDirective::ProcessTenure( - snapshot.consensus_hash.clone(), - snapshot.parent_burn_header_hash.clone(), - snapshot.winning_stacks_block_hash.clone(), + snapshot.consensus_hash, + snapshot.parent_burn_header_hash, + snapshot.winning_stacks_block_hash, )) .is_ok(); } @@ -5266,17 +5187,15 @@ impl StacksNode { for op in block_commits.into_iter() { if op.txid == block_snapshot.winning_block_txid { info!( - "Received burnchain block #{} including block_commit_op (winning) - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op (winning) - {} ({})", + op.apparent_sender, &op.block_header_hash ); last_sortitioned_block = Some((block_snapshot.clone(), op.vtxindex)); - } else { - if self.is_miner { - info!( - "Received burnchain block #{} including block_commit_op - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash - ); - } + } else if self.is_miner { + info!( + "Received burnchain block #{block_height} including block_commit_op - {} ({})", + op.apparent_sender, &op.block_header_hash + ); } } @@ -5289,8 +5208,7 @@ impl StacksNode { let num_key_registers = key_registers.len(); debug!( - "Processed burnchain state at height {}: {} leader keys, {} block-commits (ibd = {})", - block_height, num_key_registers, num_block_commits, ibd + "Processed burnchain state at height {block_height}: {num_key_registers} leader keys, {num_block_commits} block-commits (ibd = {ibd})" ); // save the registered VRF key @@ -5306,28 +5224,28 @@ impl StacksNode { return ret; }; - info!("Activated VRF key; saving to {}", &path); + info!("Activated VRF key; saving to {path}"); let Ok(key_json) = serde_json::to_string(&activated_key) else { warn!("Failed to serialize VRF key"); return ret; }; - let mut f = match fs::File::create(&path) { + let mut f = match fs::File::create(path) { Ok(f) => f, Err(e) => { - warn!("Failed to create {}: {:?}", &path, &e); + warn!("Failed to create {path}: {e:?}"); return ret; } }; - if let Err(e) = f.write_all(key_json.as_str().as_bytes()) { - warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); + if let Err(e) = f.write_all(key_json.as_bytes()) { + warn!("Failed to write activated VRF key to {path}: {e:?}"); return ret; } - info!("Saved activated VRF key to {}", &path); - return ret; + info!("Saved activated VRF key to {path}"); + ret } /// Join all inner threads diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 1895912ba5..146441d2ae 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -28,7 +28,7 @@ use stacks::chainstate::stacks::{ TransactionAnchorMode, TransactionPayload, TransactionVersion, }; use stacks::core::mempool::MemPoolDB; -use stacks::core::STACKS_EPOCH_2_1_MARKER; +use stacks::core::{EpochList, STACKS_EPOCH_2_1_MARKER}; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; use stacks::net::atlas::{AtlasConfig, AtlasDB, AttachmentInstance}; @@ -151,6 +151,7 @@ pub fn get_names(use_test_chainstate_data: bool) -> Box x, Err(e) => { - warn!("Error while connecting burnchain db in peer loop: {}", e); + warn!("Error while connecting burnchain db in peer loop: {e}"); thread::sleep(time::Duration::from_secs(1)); continue; } @@ -203,7 +204,7 @@ fn spawn_peer( ) { Ok(x) => x, Err(e) => { - warn!("Error while connecting chainstate db in peer loop: {}", e); + warn!("Error while connecting chainstate db in peer loop: {e}"); thread::sleep(time::Duration::from_secs(1)); continue; } @@ -221,7 +222,7 @@ fn spawn_peer( ) { Ok(x) => x, Err(e) => { - warn!("Error while connecting to mempool db in peer loop: {}", e); + warn!("Error while connecting to mempool db in peer loop: {e}"); thread::sleep(time::Duration::from_secs(1)); continue; } @@ -268,7 +269,7 @@ pub fn use_test_genesis_chainstate(config: &Config) -> bool { impl Node { /// Instantiate and initialize a new node, given a config - pub fn new(config: Config, boot_block_exec: Box ()>) -> Self { + pub fn new(config: Config, boot_block_exec: Box) -> Self { let use_test_genesis_data = if config.burnchain.mode == "mocknet" { use_test_genesis_chainstate(&config) } else { @@ -318,9 +319,8 @@ impl Node { let (chain_state, receipts) = match chain_state_result { Ok(res) => res, Err(err) => panic!( - "Error while opening chain state at path {}: {:?}", - config.get_chainstate_path_str(), - err + "Error while opening chain state at path {}: {err:?}", + config.get_chainstate_path_str() ), }; @@ -401,24 +401,25 @@ impl Node { ) .expect("Error while instantiating burnchain db"); - let epochs = SortitionDB::get_stacks_epochs(sortdb.conn()) + let epochs_vec = SortitionDB::get_stacks_epochs(sortdb.conn()) .expect("Error while loading stacks epochs"); + let epochs = EpochList::new(&epochs_vec); Config::assert_valid_epoch_settings(&burnchain, &epochs); let view = { - let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("Failed to get sortition tip"); SortitionDB::get_burnchain_view(&sortdb.index_conn(), &burnchain, &sortition_tip) .unwrap() }; // create a new peerdb - let data_url = UrlString::try_from(format!("{}", self.config.node.data_url)).unwrap(); + let data_url = UrlString::try_from(self.config.node.data_url.to_string()).unwrap(); let initial_neighbors = self.config.node.bootstrap_node.clone(); - println!("BOOTSTRAP WITH {:?}", initial_neighbors); + println!("BOOTSTRAP WITH {initial_neighbors:?}"); let rpc_sock: SocketAddr = self.config.node.rpc_bind.parse().unwrap_or_else(|_| { @@ -452,7 +453,7 @@ impl Node { self.config.burnchain.chain_id, burnchain.network_id, Some(node_privkey), - self.config.connection_options.private_key_lifetime.clone(), + self.config.connection_options.private_key_lifetime, PeerAddress::from_socketaddr(&p2p_addr), p2p_sock.port(), data_url, @@ -464,10 +465,10 @@ impl Node { println!("DENY NEIGHBORS {:?}", &self.config.node.deny_nodes); { - let mut tx = peerdb.tx_begin().unwrap(); + let tx = peerdb.tx_begin().unwrap(); for denied in self.config.node.deny_nodes.iter() { PeerDB::set_deny_peer( - &mut tx, + &tx, denied.addr.network_id, &denied.addr.addrbytes, denied.addr.port, @@ -488,12 +489,16 @@ impl Node { }; let event_dispatcher = self.event_dispatcher.clone(); - let exit_at_block_height = self.config.burnchain.process_exit_at_block_height.clone(); + let exit_at_block_height = self.config.burnchain.process_exit_at_block_height; + let burnchain_db = burnchain + .open_burnchain_db(false) + .expect("Failed to open burnchain DB"); let p2p_net = PeerNetwork::new( peerdb, atlasdb, stackerdbs, + burnchain_db, local_peer, self.config.burnchain.peer_version, burnchain.clone(), @@ -577,9 +582,9 @@ impl Node { // Registered key has been mined new_key = Some(RegisteredKey { vrf_public_key: op.public_key.clone(), - block_height: op.block_height as u64, - op_vtxindex: op.vtxindex as u32, - target_block_height: (op.block_height as u64) - 1, + block_height: op.block_height, + op_vtxindex: op.vtxindex, + target_block_height: op.block_height - 1, memo: op.memo.clone(), }); } @@ -649,7 +654,7 @@ impl Node { burnchain.pox_constants, ) .expect("Error while opening sortition db"); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: failed to query canonical burn chain tip"); // Generates a proof out of the sortition hash provided in the params. @@ -734,7 +739,7 @@ impl Node { anchored_block_from_ongoing_tenure.header.block_hash(), burn_fee, ®istered_key, - &burnchain_tip, + burnchain_tip, VRFSeed::from_proof(&vrf_proof), ); @@ -784,15 +789,13 @@ impl Node { ) .unwrap_or_else(|_| { panic!( - "BUG: could not query chainstate to find parent consensus hash of {}/{}", - consensus_hash, + "BUG: could not query chainstate to find parent consensus hash of {consensus_hash}/{}", &anchored_block.block_hash() ) }) .unwrap_or_else(|| { panic!( - "BUG: no such parent of block {}/{}", - consensus_hash, + "BUG: no such parent of block {consensus_hash}/{}", &anchored_block.block_hash() ) }); @@ -802,7 +805,7 @@ impl Node { .preprocess_anchored_block( &ic, consensus_hash, - &anchored_block, + anchored_block, &parent_consensus_hash, 0, ) @@ -813,7 +816,7 @@ impl Node { let res = self .chain_state .preprocess_streamed_microblock( - &consensus_hash, + consensus_hash, &anchored_block.block_hash(), microblock, ) @@ -847,33 +850,30 @@ impl Node { ) }; match process_blocks_at_tip { - Err(e) => panic!("Error while processing block - {:?}", e), + Err(e) => panic!("Error while processing block - {e:?}"), Ok(ref mut blocks) => { - if blocks.len() == 0 { + if blocks.is_empty() { break; } else { for block in blocks.iter() { - match block { - (Some(epoch_receipt), _) => { - let attachments_instances = - self.get_attachment_instances(epoch_receipt, &atlas_config); - if !attachments_instances.is_empty() { - for new_attachment in attachments_instances.into_iter() { - if let Err(e) = - atlas_db.queue_attachment_instance(&new_attachment) - { - warn!( - "Atlas: Error writing attachment instance to DB"; - "err" => ?e, - "index_block_hash" => %new_attachment.index_block_hash, - "contract_id" => %new_attachment.contract_id, - "attachment_index" => %new_attachment.attachment_index, - ); - } + if let (Some(epoch_receipt), _) = block { + let attachments_instances = + self.get_attachment_instances(epoch_receipt, &atlas_config); + if !attachments_instances.is_empty() { + for new_attachment in attachments_instances.into_iter() { + if let Err(e) = + atlas_db.queue_attachment_instance(&new_attachment) + { + warn!( + "Atlas: Error writing attachment instance to DB"; + "err" => ?e, + "index_block_hash" => %new_attachment.index_block_hash, + "contract_id" => %new_attachment.contract_id, + "attachment_index" => %new_attachment.attachment_index, + ); } } } - _ => {} } } @@ -990,7 +990,7 @@ impl Node { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo: vec![], - consensus_hash: consensus_hash.clone(), + consensus_hash: *consensus_hash, vtxindex: 1, txid, block_height: 0, diff --git a/testnet/stacks-node/src/operations.rs b/testnet/stacks-node/src/operations.rs index 4680098d2b..7e26fb42e2 100644 --- a/testnet/stacks-node/src/operations.rs +++ b/testnet/stacks-node/src/operations.rs @@ -31,8 +31,7 @@ impl BurnchainOpSigner { } pub fn get_public_key(&mut self) -> Secp256k1PublicKey { - let public_key = Secp256k1PublicKey::from_private(&self.secret_key); - public_key + Secp256k1PublicKey::from_private(&self.secret_key) } pub fn sign_message(&mut self, hash: &[u8]) -> Option { @@ -44,7 +43,7 @@ impl BurnchainOpSigner { let signature = match self.secret_key.sign(hash) { Ok(r) => r, Err(e) => { - debug!("Secret key error: {:?}", &e); + debug!("Secret key error: {e:?}"); return None; } }; diff --git a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs index 85ace37fa4..648c6d7470 100644 --- a/testnet/stacks-node/src/run_loop/boot_nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/boot_nakamoto.rs @@ -137,8 +137,8 @@ impl BootRunLoop { /// node depending on the current burnchain height. pub fn start(&mut self, burnchain_opt: Option, mine_start: u64) { match self.active_loop { - InnerLoops::Epoch2(_) => return self.start_from_neon(burnchain_opt, mine_start), - InnerLoops::Epoch3(_) => return self.start_from_naka(burnchain_opt, mine_start), + InnerLoops::Epoch2(_) => self.start_from_neon(burnchain_opt, mine_start), + InnerLoops::Epoch3(_) => self.start_from_naka(burnchain_opt, mine_start), } } @@ -227,7 +227,7 @@ impl BootRunLoop { // if loop exited, do the transition info!("Epoch-3.0 boundary reached, stopping Epoch-2.x run loop"); neon_term_switch.store(false, Ordering::SeqCst); - return true + true }) } @@ -237,8 +237,9 @@ impl BootRunLoop { config.burnchain.get_bitcoin_network().1, config.burnchain.epochs.as_ref(), ); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30) - .ok_or("No Epoch-3.0 defined")?]; + let epoch_3 = epochs + .get(StacksEpochId::Epoch30) + .ok_or("No Epoch-3.0 defined")?; Ok(u64::from(burn_height) >= epoch_3.start_height - 1) } diff --git a/testnet/stacks-node/src/run_loop/helium.rs b/testnet/stacks-node/src/run_loop/helium.rs index 2922ce584a..c61581553c 100644 --- a/testnet/stacks-node/src/run_loop/helium.rs +++ b/testnet/stacks-node/src/run_loop/helium.rs @@ -21,10 +21,7 @@ impl RunLoop { } /// Sets up a runloop and node, given a config. - pub fn new_with_boot_exec( - config: Config, - boot_exec: Box ()>, - ) -> Self { + pub fn new_with_boot_exec(config: Config, boot_exec: Box) -> Self { // Build node based on config let node = Node::new(config.clone(), boot_exec); @@ -174,17 +171,14 @@ impl RunLoop { None => None, }; - match artifacts_from_tenure { - Some(ref artifacts) => { - // Have each node receive artifacts from the current tenure - self.node.commit_artifacts( - &artifacts.anchored_block, - &artifacts.parent_block, - &mut burnchain, - artifacts.burn_fee, - ); - } - None => {} + if let Some(artifacts) = &artifacts_from_tenure { + // Have each node receive artifacts from the current tenure + self.node.commit_artifacts( + &artifacts.anchored_block, + &artifacts.parent_block, + &mut burnchain, + artifacts.burn_fee, + ); } let (new_burnchain_tip, _) = burnchain.sync(None)?; diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index b2b9aa3f75..7990c04332 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -37,6 +37,7 @@ macro_rules! info_green { }) } +#[allow(clippy::type_complexity)] pub struct RunLoopCallbacks { on_burn_chain_initialized: Option)>, on_new_burn_chain_state: Option, @@ -45,6 +46,12 @@ pub struct RunLoopCallbacks { on_new_tenure: Option, } +impl Default for RunLoopCallbacks { + fn default() -> Self { + Self::new() + } +} + impl RunLoopCallbacks { pub fn new() -> RunLoopCallbacks { RunLoopCallbacks { @@ -125,7 +132,7 @@ impl RunLoopCallbacks { match &tx.payload { TransactionPayload::Coinbase(..) => println!(" Coinbase"), TransactionPayload::SmartContract(contract, ..) => println!(" Publish smart contract\n**************************\n{:?}\n**************************", contract.code_body), - TransactionPayload::TokenTransfer(recipent, amount, _) => println!(" Transfering {} µSTX to {}", amount, recipent), + TransactionPayload::TokenTransfer(recipent, amount, _) => println!(" Transfering {amount} µSTX to {recipent}"), _ => println!(" {:?}", tx.payload) } } @@ -167,7 +174,7 @@ pub fn announce_boot_receipts( event_dispatcher: &mut EventDispatcher, chainstate: &StacksChainState, pox_constants: &PoxConstants, - boot_receipts: &Vec, + boot_receipts: &[StacksTransactionReceipt], ) { let block_header_0 = StacksChainState::get_genesis_header_info(chainstate.db()) .expect("FATAL: genesis block header not stored"); @@ -189,7 +196,7 @@ pub fn announce_boot_receipts( Txid([0x00; 32]), &[], None, - block_header_0.burn_header_hash.clone(), + block_header_0.burn_header_hash, block_header_0.burn_header_height, block_header_0.burn_header_timestamp, &ExecutionCost::zero(), diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index 04afdd79ee..16f5a12b2d 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -100,7 +100,7 @@ impl RunLoop { config, globals: None, coordinator_channels: Some(channels), - counters: counters.unwrap_or_else(|| Counters::new()), + counters: counters.unwrap_or_default(), should_keep_running, event_dispatcher, pox_watchdog: None, @@ -167,9 +167,8 @@ impl RunLoop { if self.config.node.miner { let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); - match burnchain.create_wallet_if_dne() { - Err(e) => warn!("Error when creating wallet: {:?}", e), - _ => {} + if let Err(e) = burnchain.create_wallet_if_dne() { + warn!("Error when creating wallet: {e:?}"); } let mut btc_addrs = vec![( StacksEpochId::Epoch2_05, @@ -285,7 +284,6 @@ impl RunLoop { let mut atlas_config = AtlasConfig::new(self.config.is_mainnet()); let genesis_attachments = GenesisData::new(use_test_genesis_data) .read_name_zonefiles() - .into_iter() .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec())) .collect(); atlas_config.genesis_attachments = Some(genesis_attachments); @@ -296,7 +294,7 @@ impl RunLoop { let moved_atlas_config = self.config.atlas.clone(); let moved_config = self.config.clone(); let moved_burnchain_config = burnchain_config.clone(); - let mut coordinator_dispatcher = self.event_dispatcher.clone(); + let coordinator_dispatcher = self.event_dispatcher.clone(); let atlas_db = AtlasDB::connect( moved_atlas_config.clone(), &self.config.get_atlas_db_file_path(), @@ -325,13 +323,12 @@ impl RunLoop { require_affirmed_anchor_blocks: moved_config .node .require_affirmed_anchor_blocks, - ..ChainsCoordinatorConfig::new() }; ChainsCoordinator::run( coord_config, chain_state_db, moved_burnchain_config, - &mut coordinator_dispatcher, + &coordinator_dispatcher, coordinator_receivers, moved_atlas_config, cost_estimator.as_deref_mut(), @@ -382,7 +379,7 @@ impl RunLoop { Some(sn) => sn, None => { debug!("No canonical stacks chain tip hash present"); - let sn = SortitionDB::get_first_block_snapshot(&sortdb.conn()) + let sn = SortitionDB::get_first_block_snapshot(sortdb.conn()) .expect("BUG: failed to get first-ever block snapshot"); sn } @@ -432,7 +429,7 @@ impl RunLoop { return; } Err(e) => { - error!("Error initializing burnchain: {}", e); + error!("Error initializing burnchain: {e}"); info!("Exiting stacks-node"); return; } @@ -477,7 +474,7 @@ impl RunLoop { // Make sure at least one sortition has happened, and make sure it's globally available let sortdb = burnchain.sortdb_mut(); let (rc_aligned_height, sn) = - RunLoop::get_reward_cycle_sortition_db_height(&sortdb, &burnchain_config); + RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config); let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height { // need at least one sortition to happen. @@ -525,10 +522,7 @@ impl RunLoop { burnchain.get_headers_height() - 1, ); - debug!( - "Runloop: Begin main runloop starting a burnchain block {}", - sortition_db_height - ); + debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; let mut poll_deadline = 0; @@ -576,11 +570,10 @@ impl RunLoop { // runloop will cause the PoX sync watchdog to wait until it believes that the node has // obtained all the Stacks blocks it can. debug!( - "Runloop: Download burnchain blocks up to reward cycle #{} (height {})", + "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})", burnchain_config .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: target burnchain block height does not have a reward cycle"), - target_burnchain_block_height; + .expect("FATAL: target burnchain block height does not have a reward cycle"); "total_burn_sync_percent" => %percent, "local_burn_height" => burnchain_tip.block_snapshot.block_height, "remote_tip_height" => remote_chain_height @@ -601,7 +594,7 @@ impl RunLoop { match burnchain.sync(Some(target_burnchain_block_height)) { Ok(x) => x, Err(e) => { - warn!("Runloop: Burnchain controller stopped: {}", e); + warn!("Runloop: Burnchain controller stopped: {e}"); continue; } }; @@ -615,15 +608,13 @@ impl RunLoop { if next_sortition_height != last_tenure_sortition_height { info!( - "Runloop: Downloaded burnchain blocks up to height {}; target height is {}; remote_chain_height = {} next_sortition_height = {}, sortition_db_height = {}", - burnchain_height, target_burnchain_block_height, remote_chain_height, next_sortition_height, sortition_db_height + "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}" ); } if next_sortition_height > sortition_db_height { debug!( - "Runloop: New burnchain block height {} > {}", - next_sortition_height, sortition_db_height + "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); let mut sort_count = 0; @@ -669,8 +660,7 @@ impl RunLoop { num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {} from {} (chain tip height is {}); {} sortitions", - next_sortition_height, sortition_db_height, burnchain_height, num_sortitions_in_last_cycle; + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" ); sortition_db_height = next_sortition_height; @@ -702,7 +692,7 @@ impl RunLoop { remote_chain_height, ); - debug!("Runloop: Advance target burnchain block height from {} to {} (sortition height {})", target_burnchain_block_height, next_target_burnchain_block_height, sortition_db_height); + debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); target_burnchain_block_height = next_target_burnchain_block_height; if sortition_db_height >= burnchain_height && !ibd { @@ -712,9 +702,7 @@ impl RunLoop { .unwrap_or(0); if canonical_stacks_tip_height < mine_start { info!( - "Runloop: Synchronized full burnchain, but stacks tip height is {}, and we are trying to boot to {}, not mining until reaching chain tip", - canonical_stacks_tip_height, - mine_start + "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip" ); } else { // once we've synced to the chain tip once, don't apply this check again. @@ -723,10 +711,15 @@ impl RunLoop { // at tip, and not downloading. proceed to mine. if last_tenure_sortition_height != sortition_db_height { - info!( - "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", - sortition_db_height - ); + if is_miner { + info!( + "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks" + ); + } else { + info!( + "Runloop: Synchronized full burnchain up to height {sortition_db_height}." + ); + } last_tenure_sortition_height = sortition_db_height; globals.raise_initiative("runloop-synced".to_string()); } diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index a18a61988b..5e021e50ab 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -93,6 +93,19 @@ impl Default for TestFlag { } } +#[cfg(test)] +impl TestFlag { + /// Set the test flag to the given value + pub fn set(&self, value: bool) { + *self.0.lock().unwrap() = Some(value); + } + + /// Get the test flag value. Defaults to false if the flag is not set. + pub fn get(&self) -> bool { + self.0.lock().unwrap().unwrap_or(false) + } +} + #[derive(Clone, Default)] pub struct Counters { pub blocks_processed: RunLoopCounter, @@ -342,7 +355,7 @@ impl RunLoop { } } _ => { - let msg = format!("Graceful termination request received (signal `{}`), will complete the ongoing runloop cycles and terminate\n", sig_id); + let msg = format!("Graceful termination request received (signal `{sig_id}`), will complete the ongoing runloop cycles and terminate\n"); async_safe_write_stderr(&msg); keep_running_writer.store(false, Ordering::SeqCst); } @@ -353,7 +366,7 @@ impl RunLoop { if cfg!(test) || allow_err { info!("Error setting up signal handler, may have already been set"); } else { - panic!("FATAL: error setting termination handler - {}", e); + panic!("FATAL: error setting termination handler - {e}"); } } } @@ -369,9 +382,8 @@ impl RunLoop { if self.config.node.miner { let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); - match burnchain.create_wallet_if_dne() { - Err(e) => warn!("Error when creating wallet: {:?}", e), - _ => {} + if let Err(e) = burnchain.create_wallet_if_dne() { + warn!("Error when creating wallet: {e:?}"); } let mut btc_addrs = vec![( StacksEpochId::Epoch2_05, @@ -462,7 +474,7 @@ impl RunLoop { panic!(); } Err(e) => { - panic!("FATAL: unable to query filesystem or databases: {:?}", &e); + panic!("FATAL: unable to query filesystem or databases: {e:?}"); } } @@ -476,13 +488,13 @@ impl RunLoop { Some(burnchain_tip) => { // database exists already, and has blocks -- just sync to its tip. let target_height = burnchain_tip.block_height + 1; - debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {}", burnchain_tip.block_height, target_height); + debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {target_height}", burnchain_tip.block_height); target_height } None => { // database does not exist yet let target_height = 1.max(burnchain_config.first_block_height + 1); - debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {}", target_height); + debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {target_height}"); target_height } }; @@ -490,22 +502,19 @@ impl RunLoop { burnchain_controller .start(Some(target_burnchain_block_height)) .map_err(|e| { - match e { - Error::CoordinatorClosed => { - if !should_keep_running.load(Ordering::SeqCst) { - info!("Shutdown initiated during burnchain initialization: {}", e); - return burnchain_error::ShutdownInitiated; - } - } - _ => {} + if matches!(e, Error::CoordinatorClosed) + && !should_keep_running.load(Ordering::SeqCst) + { + info!("Shutdown initiated during burnchain initialization: {e}"); + return burnchain_error::ShutdownInitiated; } - error!("Burnchain controller stopped: {}", e); + error!("Burnchain controller stopped: {e}"); panic!(); })?; // if the chainstate DBs don't exist, this will instantiate them if let Err(e) = burnchain_controller.connect_dbs() { - error!("Failed to connect to burnchain databases: {}", e); + error!("Failed to connect to burnchain databases: {e}"); panic!(); }; @@ -581,7 +590,6 @@ impl RunLoop { let mut atlas_config = AtlasConfig::new(self.config.is_mainnet()); let genesis_attachments = GenesisData::new(use_test_genesis_data) .read_name_zonefiles() - .into_iter() .map(|z| Attachment::new(z.zonefile_content.as_bytes().to_vec())) .collect(); atlas_config.genesis_attachments = Some(genesis_attachments); @@ -592,7 +600,7 @@ impl RunLoop { let moved_atlas_config = self.config.atlas.clone(); let moved_config = self.config.clone(); let moved_burnchain_config = burnchain_config.clone(); - let mut coordinator_dispatcher = self.event_dispatcher.clone(); + let coordinator_dispatcher = self.event_dispatcher.clone(); let atlas_db = AtlasDB::connect( moved_atlas_config.clone(), &self.config.get_atlas_db_file_path(), @@ -621,13 +629,12 @@ impl RunLoop { require_affirmed_anchor_blocks: moved_config .node .require_affirmed_anchor_blocks, - ..ChainsCoordinatorConfig::new() }; ChainsCoordinator::run( coord_config, chain_state_db, moved_burnchain_config, - &mut coordinator_dispatcher, + &coordinator_dispatcher, coordinator_receivers, moved_atlas_config, cost_estimator.as_deref_mut(), @@ -685,7 +692,7 @@ impl RunLoop { Some(sn) => sn, None => { debug!("No canonical stacks chain tip hash present"); - let sn = SortitionDB::get_first_block_snapshot(&sortdb.conn()) + let sn = SortitionDB::get_first_block_snapshot(sortdb.conn()) .expect("BUG: failed to get first-ever block snapshot"); sn } @@ -737,7 +744,7 @@ impl RunLoop { let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( - &burnchain, + burnchain, &indexer, &burnchain_db, sortdb, @@ -745,7 +752,7 @@ impl RunLoop { ) { Ok(am) => am, Err(e) => { - warn!("Failed to find heaviest affirmation map: {:?}", &e); + warn!("Failed to find heaviest affirmation map: {e:?}"); return; } }; @@ -761,7 +768,7 @@ impl RunLoop { match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { Ok(am) => am, Err(e) => { - warn!("Failed to find sortition affirmation map: {:?}", &e); + warn!("Failed to find sortition affirmation map: {e:?}"); return; } }; @@ -787,26 +794,24 @@ impl RunLoop { .find_divergence(&heaviest_affirmation_map) .is_some() { - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map); + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {sortition_tip_affirmation_map}, heaviest: {heaviest_affirmation_map})"); globals.coord().announce_new_burn_block(); } else if highest_sn.block_height == sn.block_height && sn.block_height == canonical_burnchain_tip.block_height { // need to force an affirmation reorg because there will be no more burn block // announcements. - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, burn height {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, sn.block_height); + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {sortition_tip_affirmation_map}, heaviest: {heaviest_affirmation_map}, burn height {})", sn.block_height); globals.coord().announce_new_burn_block(); } debug!( - "Drive stacks block processing: possible PoX reorg (stacks tip: {}, heaviest: {})", - &stacks_tip_affirmation_map, &heaviest_affirmation_map + "Drive stacks block processing: possible PoX reorg (stacks tip: {stacks_tip_affirmation_map}, heaviest: {heaviest_affirmation_map})" ); globals.coord().announce_new_stacks_block(); } else { debug!( - "Drive stacks block processing: no need (stacks tip: {}, heaviest: {})", - &stacks_tip_affirmation_map, &heaviest_affirmation_map + "Drive stacks block processing: no need (stacks tip: {stacks_tip_affirmation_map}, heaviest: {heaviest_affirmation_map})" ); // announce a new stacks block to force the chains coordinator @@ -877,7 +882,7 @@ impl RunLoop { match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { Ok(am) => am, Err(e) => { - warn!("Failed to find sortition affirmation map: {:?}", &e); + warn!("Failed to find sortition affirmation map: {e:?}"); return; } }; @@ -885,7 +890,7 @@ impl RunLoop { let indexer = make_bitcoin_indexer(config, Some(globals.should_keep_running.clone())); let heaviest_affirmation_map = match static_get_heaviest_affirmation_map( - &burnchain, + burnchain, &indexer, &burnchain_db, sortdb, @@ -893,22 +898,22 @@ impl RunLoop { ) { Ok(am) => am, Err(e) => { - warn!("Failed to find heaviest affirmation map: {:?}", &e); + warn!("Failed to find heaviest affirmation map: {e:?}"); return; } }; let canonical_affirmation_map = match static_get_canonical_affirmation_map( - &burnchain, + burnchain, &indexer, &burnchain_db, sortdb, - &chain_state_db, + chain_state_db, &sn.sortition_id, ) { Ok(am) => am, Err(e) => { - warn!("Failed to find canonical affirmation map: {:?}", &e); + warn!("Failed to find canonical affirmation map: {e:?}"); return; } }; @@ -919,7 +924,7 @@ impl RunLoop { .is_some() || sn.block_height < highest_sn.block_height { - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, {} = (heaviest_affirmation_map.len() as u64) { // we have unaffirmed PoX anchor blocks that are not yet processed in the sortition history - debug!("Drive burnchain processing: possible PoX reorg from unprocessed anchor block(s) (sortition tip: {}, heaviest: {}, canonical: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, &canonical_affirmation_map); + debug!("Drive burnchain processing: possible PoX reorg from unprocessed anchor block(s) (sortition tip: {sortition_tip_affirmation_map}, heaviest: {heaviest_affirmation_map}, canonical: {canonical_affirmation_map})"); globals.coord().announce_new_burn_block(); globals.coord().announce_new_stacks_block(); *last_announce_time = get_epoch_time_secs().into(); @@ -939,9 +944,7 @@ impl RunLoop { } } else { debug!( - "Drive burn block processing: no need (sortition tip: {}, heaviest: {}, {} ` so that data can be passed to `NakamotoNode` pub fn start( @@ -1063,7 +1064,7 @@ impl RunLoop { return None; } Err(e) => { - error!("Error initializing burnchain: {}", e); + error!("Error initializing burnchain: {e}"); info!("Exiting stacks-node"); return None; } @@ -1109,7 +1110,7 @@ impl RunLoop { // Make sure at least one sortition has happened, and make sure it's globally available let sortdb = burnchain.sortdb_mut(); let (rc_aligned_height, sn) = - RunLoop::get_reward_cycle_sortition_db_height(&sortdb, &burnchain_config); + RunLoop::get_reward_cycle_sortition_db_height(sortdb, &burnchain_config); let burnchain_tip_snapshot = if sn.block_height == burnchain_config.first_block_height { // need at least one sortition to happen. @@ -1137,7 +1138,7 @@ impl RunLoop { .tx_begin() .expect("FATAL: failed to begin burnchain DB tx"); for (reward_cycle, affirmation) in self.config.burnchain.affirmation_overrides.iter() { - tx.set_override_affirmation_map(*reward_cycle, affirmation.clone()).expect(&format!("FATAL: failed to set affirmation override ({affirmation}) for reward cycle {reward_cycle}")); + tx.set_override_affirmation_map(*reward_cycle, affirmation.clone()).unwrap_or_else(|_| panic!("FATAL: failed to set affirmation override ({affirmation}) for reward cycle {reward_cycle}")); } tx.commit() .expect("FATAL: failed to commit burnchain DB tx"); @@ -1168,10 +1169,7 @@ impl RunLoop { burnchain.get_headers_height() - 1, ); - debug!( - "Runloop: Begin main runloop starting a burnchain block {}", - sortition_db_height - ); + debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; @@ -1210,7 +1208,7 @@ impl RunLoop { ) { Ok(ibd) => ibd, Err(e) => { - debug!("Runloop: PoX sync wait routine aborted: {:?}", e); + debug!("Runloop: PoX sync wait routine aborted: {e:?}"); continue; } }; @@ -1231,11 +1229,10 @@ impl RunLoop { // runloop will cause the PoX sync watchdog to wait until it believes that the node has // obtained all the Stacks blocks it can. debug!( - "Runloop: Download burnchain blocks up to reward cycle #{} (height {})", + "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})", burnchain_config .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: target burnchain block height does not have a reward cycle"), - target_burnchain_block_height; + .expect("FATAL: target burnchain block height does not have a reward cycle"); "total_burn_sync_percent" => %percent, "local_burn_height" => burnchain_tip.block_snapshot.block_height, "remote_tip_height" => remote_chain_height @@ -1250,7 +1247,7 @@ impl RunLoop { match burnchain.sync(Some(target_burnchain_block_height)) { Ok(x) => x, Err(e) => { - warn!("Runloop: Burnchain controller stopped: {}", e); + warn!("Runloop: Burnchain controller stopped: {e}"); continue; } }; @@ -1264,15 +1261,13 @@ impl RunLoop { if next_sortition_height != last_tenure_sortition_height { info!( - "Runloop: Downloaded burnchain blocks up to height {}; target height is {}; remote_chain_height = {} next_sortition_height = {}, sortition_db_height = {}", - burnchain_height, target_burnchain_block_height, remote_chain_height, next_sortition_height, sortition_db_height + "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}" ); } if next_sortition_height > sortition_db_height { debug!( - "Runloop: New burnchain block height {} > {}", - next_sortition_height, sortition_db_height + "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); let mut sort_count = 0; @@ -1345,8 +1340,7 @@ impl RunLoop { num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {} from {} (chain tip height is {}); {} sortitions", - next_sortition_height, sortition_db_height, burnchain_height, num_sortitions_in_last_cycle; + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" ); sortition_db_height = next_sortition_height; @@ -1378,7 +1372,7 @@ impl RunLoop { remote_chain_height, ); - debug!("Runloop: Advance target burnchain block height from {} to {} (sortition height {})", target_burnchain_block_height, next_target_burnchain_block_height, sortition_db_height); + debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); target_burnchain_block_height = next_target_burnchain_block_height; if sortition_db_height >= burnchain_height && !ibd { @@ -1388,9 +1382,7 @@ impl RunLoop { .unwrap_or(0); if canonical_stacks_tip_height < mine_start { info!( - "Runloop: Synchronized full burnchain, but stacks tip height is {}, and we are trying to boot to {}, not mining until reaching chain tip", - canonical_stacks_tip_height, - mine_start + "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip" ); } else { // once we've synced to the chain tip once, don't apply this check again. @@ -1401,8 +1393,7 @@ impl RunLoop { // at tip, and not downloading. proceed to mine. if last_tenure_sortition_height != sortition_db_height { info!( - "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", - sortition_db_height + "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks" ); last_tenure_sortition_height = sortition_db_height; } diff --git a/testnet/stacks-node/src/stacks_events.rs b/testnet/stacks-node/src/stacks_events.rs index f63b17a6ab..d7ec349466 100644 --- a/testnet/stacks-node/src/stacks_events.rs +++ b/testnet/stacks-node/src/stacks_events.rs @@ -22,10 +22,7 @@ fn main() { if help { println!("Usage: stacks-events [--addr=]"); - println!( - " --addr= Address to listen on (default: {})", - DEFAULT_ADDR - ); + println!(" --addr= Address to listen on (default: {DEFAULT_ADDR})",); return; } @@ -34,7 +31,7 @@ fn main() { fn serve_for_events(addr: &String) { let listener = TcpListener::bind(addr).unwrap(); - eprintln!("Listening on {}", addr); + eprintln!("Listening on {addr}"); for stream in listener.incoming() { let stream = stream.unwrap(); handle_connection(stream); @@ -82,17 +79,16 @@ fn handle_connection(mut stream: TcpStream) { "path": path.unwrap(), "payload": payload_json, }); - println!("{}", record); + println!("{record}"); { let contents = "Thanks!"; let response = format!( - "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{}", - contents.len(), - contents + "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{contents}", + contents.len() ); - stream.write(response.as_bytes()).unwrap(); + let _nmb_bytes = stream.write(response.as_bytes()).unwrap(); stream.flush().unwrap(); } } diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index ff68126a83..395d829c8f 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -69,7 +69,7 @@ impl PoxSyncWatchdogComms { self.interruptable_sleep(1)?; std::hint::spin_loop(); } - return Ok(true); + Ok(true) } fn interruptable_sleep(&self, secs: u64) -> Result<(), burnchain_error> { @@ -95,7 +95,7 @@ impl PoxSyncWatchdogComms { self.interruptable_sleep(1)?; std::hint::spin_loop(); } - return Ok(true); + Ok(true) } pub fn should_keep_running(&self) -> bool { @@ -180,8 +180,7 @@ impl PoxSyncWatchdog { Ok(cs) => cs, Err(e) => { return Err(format!( - "Failed to open chainstate at '{}': {:?}", - &chainstate_path, &e + "Failed to open chainstate at '{chainstate_path}': {e:?}" )); } }; @@ -192,7 +191,7 @@ impl PoxSyncWatchdog { new_processed_blocks: VecDeque::new(), last_attachable_query: 0, last_processed_query: 0, - max_samples: max_samples, + max_samples, max_staging: 10, watch_start_ts: 0, last_block_processed_ts: 0, @@ -200,7 +199,7 @@ impl PoxSyncWatchdog { estimated_block_process_time: 5.0, steady_state_burnchain_sync_interval: burnchain_poll_time, steady_state_resync_ts: 0, - chainstate: chainstate, + chainstate, relayer_comms: watchdog_comms, }) } @@ -213,11 +212,11 @@ impl PoxSyncWatchdog { fn count_attachable_stacks_blocks(&mut self) -> Result { // number of staging blocks that have arrived since the last sortition let cnt = StacksChainState::count_attachable_staging_blocks( - &self.chainstate.db(), + self.chainstate.db(), self.max_staging, self.last_attachable_query, ) - .map_err(|e| format!("Failed to count attachable staging blocks: {:?}", &e))?; + .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; self.last_attachable_query = get_epoch_time_secs(); Ok(cnt) @@ -229,11 +228,11 @@ impl PoxSyncWatchdog { fn count_processed_stacks_blocks(&mut self) -> Result { // number of staging blocks that have arrived since the last sortition let cnt = StacksChainState::count_processed_staging_blocks( - &self.chainstate.db(), + self.chainstate.db(), self.max_staging, self.last_processed_query, ) - .map_err(|e| format!("Failed to count attachable staging blocks: {:?}", &e))?; + .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; self.last_processed_query = get_epoch_time_secs(); Ok(cnt) @@ -250,13 +249,13 @@ impl PoxSyncWatchdog { last_processed_height + (burnchain.stable_confirmations as u64) < burnchain_height; if ibd { debug!( - "PoX watchdog: {} + {} < {}, so initial block download", - last_processed_height, burnchain.stable_confirmations, burnchain_height + "PoX watchdog: {last_processed_height} + {} < {burnchain_height}, so initial block download", + burnchain.stable_confirmations ); } else { debug!( - "PoX watchdog: {} + {} >= {}, so steady-state", - last_processed_height, burnchain.stable_confirmations, burnchain_height + "PoX watchdog: {last_processed_height} + {} >= {burnchain_height}, so steady-state", + burnchain.stable_confirmations ); } ibd @@ -281,7 +280,7 @@ impl PoxSyncWatchdog { /// Is a derivative approximately flat, with a maximum absolute deviation from 0? /// Return whether or not the sample is mostly flat, and how many points were over the given /// error bar in either direction. - fn is_mostly_flat(deriv: &Vec, error: i64) -> (bool, usize) { + fn is_mostly_flat(deriv: &[i64], error: i64) -> (bool, usize) { let mut total_deviates = 0; let mut ret = true; for d in deriv.iter() { @@ -294,7 +293,7 @@ impl PoxSyncWatchdog { } /// low and high pass filter average -- take average without the smallest and largest values - fn hilo_filter_avg(samples: &Vec) -> f64 { + fn hilo_filter_avg(samples: &[i64]) -> f64 { // take average with low and high pass let mut min = i64::MAX; let mut max = i64::MIN; @@ -344,7 +343,7 @@ impl PoxSyncWatchdog { ) -> f64 { let this_reward_cycle = burnchain .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {}", tip_height)); + .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); let prev_reward_cycle = this_reward_cycle.saturating_sub(1); let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); @@ -358,7 +357,7 @@ impl PoxSyncWatchdog { } let block_wait_times = - StacksChainState::measure_block_wait_time(&chainstate.db(), start_height, end_height) + StacksChainState::measure_block_wait_time(chainstate.db(), start_height, end_height) .expect("BUG: failed to query chainstate block-processing times"); PoxSyncWatchdog::hilo_filter_avg(&block_wait_times) @@ -372,7 +371,7 @@ impl PoxSyncWatchdog { ) -> f64 { let this_reward_cycle = burnchain .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {}", tip_height)); + .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); let prev_reward_cycle = this_reward_cycle.saturating_sub(1); let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); @@ -386,7 +385,7 @@ impl PoxSyncWatchdog { } let block_download_times = StacksChainState::measure_block_download_time( - &chainstate.db(), + chainstate.db(), start_height, end_height, ) @@ -459,10 +458,7 @@ impl PoxSyncWatchdog { } if self.unconditionally_download { - debug!( - "PoX watchdog set to unconditionally download (ibd={})", - ibbd - ); + debug!("PoX watchdog set to unconditionally download (ibd={ibbd})"); self.relayer_comms.set_ibd(ibbd); return Ok(ibbd); } @@ -561,7 +557,7 @@ impl PoxSyncWatchdog { && get_epoch_time_secs() < expected_first_block_deadline { // still waiting for that first block in this reward cycle - debug!("PoX watchdog: Still warming up: waiting until {}s for first Stacks block download (estimated download time: {}s)...", expected_first_block_deadline, self.estimated_block_download_time); + debug!("PoX watchdog: Still warming up: waiting until {expected_first_block_deadline}s for first Stacks block download (estimated download time: {}s)...", self.estimated_block_download_time); sleep_ms(PER_SAMPLE_WAIT_MS); continue; } @@ -596,8 +592,8 @@ impl PoxSyncWatchdog { let (flat_processed, processed_deviants) = PoxSyncWatchdog::is_mostly_flat(&processed_delta, 0); - debug!("PoX watchdog: flat-attachable?: {}, flat-processed?: {}, estimated block-download time: {}s, estimated block-processing time: {}s", - flat_attachable, flat_processed, self.estimated_block_download_time, self.estimated_block_process_time); + debug!("PoX watchdog: flat-attachable?: {flat_attachable}, flat-processed?: {flat_processed}, estimated block-download time: {}s, estimated block-processing time: {}s", + self.estimated_block_download_time, self.estimated_block_process_time); if flat_attachable && flat_processed && self.last_block_processed_ts == 0 { // we're flat-lining -- this may be the end of this cycle @@ -607,8 +603,8 @@ impl PoxSyncWatchdog { if self.last_block_processed_ts > 0 && get_epoch_time_secs() < expected_last_block_deadline { - debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{})s before burnchain synchronization (estimated block-processing time: {}s)", - get_epoch_time_secs() + 1, expected_last_block_deadline, self.estimated_block_process_time); + debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{expected_last_block_deadline})s before burnchain synchronization (estimated block-processing time: {}s)", + get_epoch_time_secs() + 1, self.estimated_block_process_time); sleep_ms(PER_SAMPLE_WAIT_MS); continue; } @@ -617,8 +613,7 @@ impl PoxSyncWatchdog { // doing initial burnchain block download right now. // only proceed to fetch the next reward cycle's burnchain blocks if we're neither downloading nor // attaching blocks recently - debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {}, flat-processed = {}, min-attachable: {}, min-processed: {}", - flat_attachable, flat_processed, &attachable_deviants, &processed_deviants); + debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {flat_attachable}, flat-processed = {flat_processed}, min-attachable: {attachable_deviants}, min-processed: {processed_deviants}"); if !flat_attachable || !flat_processed { sleep_ms(PER_SAMPLE_WAIT_MS); @@ -645,7 +640,7 @@ impl PoxSyncWatchdog { } (err_attach, err_processed) => { // can only happen on DB query failure - error!("PoX watchdog: Failed to count recently attached ('{:?}') and/or processed ('{:?}') staging blocks", &err_attach, &err_processed); + error!("PoX watchdog: Failed to count recently attached ('{err_attach:?}') and/or processed ('{err_processed:?}') staging blocks"); panic!(); } }; diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index 5dd67cddab..7322133889 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -41,7 +41,8 @@ pub struct Tenure { parent_block_total_burn: u64, } -impl<'a> Tenure { +impl Tenure { + #[allow(clippy::too_many_arguments)] pub fn new( parent_block: ChainTip, coinbase_tx: StacksTransaction, @@ -82,7 +83,7 @@ impl<'a> Tenure { elapsed = Instant::now().duration_since(self.burnchain_tip.received_at); } - let (mut chain_state, _) = StacksChainState::open( + let (chain_state, _) = StacksChainState::open( self.config.is_mainnet(), self.config.burnchain.chain_id, &self.config.get_chainstate_path_str(), @@ -91,13 +92,13 @@ impl<'a> Tenure { .unwrap(); let (anchored_block, _, _) = StacksBlockBuilder::build_anchored_block( - &mut chain_state, + &chain_state, burn_dbconn, &mut self.mem_pool, &self.parent_block.metadata, self.parent_block_total_burn, self.vrf_proof.clone(), - self.microblock_pubkeyhash.clone(), + self.microblock_pubkeyhash, &self.coinbase_tx, BlockBuilderSettings::limited(), None, diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 621f92aa47..3e69ac18cc 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -17,21 +17,14 @@ use crate::helium::RunLoop; use crate::tests::to_addr; use crate::Config; -#[derive(Debug)] +#[derive(Debug, thiserror::Error)] pub enum BitcoinCoreError { + #[error("bitcoind spawn failed: {0}")] SpawnFailed(String), + #[error("bitcoind stop failed: {0}")] StopFailed(String), } -impl std::fmt::Display for BitcoinCoreError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::SpawnFailed(msg) => write!(f, "bitcoind spawn failed: {msg}"), - Self::StopFailed(msg) => write!(f, "bitcoind stop failed: {msg}"), - } - } -} - type BitcoinResult = Result; pub struct BitcoinCoreController { @@ -50,21 +43,18 @@ impl BitcoinCoreController { fn add_rpc_cli_args(&self, command: &mut Command) { command.arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); - match ( + if let (Some(username), Some(password)) = ( &self.config.burnchain.username, &self.config.burnchain.password, ) { - (Some(username), Some(password)) => { - command - .arg(format!("-rpcuser={username}")) - .arg(format!("-rpcpassword={password}")); - } - _ => {} + command + .arg(format!("-rpcuser={username}")) + .arg(format!("-rpcpassword={password}")); } } pub fn start_bitcoind(&mut self) -> BitcoinResult<()> { - std::fs::create_dir_all(&self.config.get_burnchain_path_str()).unwrap(); + std::fs::create_dir_all(self.config.get_burnchain_path_str()).unwrap(); let mut command = Command::new("bitcoind"); command @@ -111,7 +101,7 @@ impl BitcoinCoreController { } pub fn stop_bitcoind(&mut self) -> Result<(), BitcoinCoreError> { - if let Some(_) = self.bitcoind_process.take() { + if self.bitcoind_process.take().is_some() { let payload = BitcoinRPCRequest { method: "stop".to_string(), params: vec![], @@ -128,8 +118,7 @@ impl BitcoinCoreController { } } else { return Err(BitcoinCoreError::StopFailed(format!( - "Invalid response: {:?}", - res + "Invalid response: {res:?}" ))); } } @@ -224,11 +213,11 @@ fn bitcoind_integration(segwit_flag: bool) { .callbacks .on_new_burn_chain_state(|round, burnchain_tip, chain_tip| { let block = &burnchain_tip.block_snapshot; - let expected_total_burn = BITCOIND_INT_TEST_COMMITS * (round as u64 + 1); + let expected_total_burn = BITCOIND_INT_TEST_COMMITS * (round + 1); assert_eq!(block.total_burn, expected_total_burn); - assert_eq!(block.sortition, true); - assert_eq!(block.num_sortitions, round as u64 + 1); - assert_eq!(block.block_height, round as u64 + 2003); + assert!(block.sortition); + assert_eq!(block.num_sortitions, round + 1); + assert_eq!(block.block_height, round + 2003); let leader_key = "f888e0cab5c16de8edf72b544a189ece5c0b95cd9178606c970789ac71d17bb4"; match round { @@ -253,7 +242,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert!(op.parent_vtxindex == 0); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } } @@ -277,7 +266,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2003); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -306,7 +295,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2004); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -335,7 +324,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2005); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -364,7 +353,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2006); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -393,7 +382,7 @@ fn bitcoind_integration(segwit_flag: bool) { assert_eq!(op.parent_block_ptr, 2007); assert_eq!(op.burn_fee, BITCOIND_INT_TEST_COMMITS); } - _ => assert!(false), + _ => panic!("Unexpected operation"), } } @@ -471,7 +460,6 @@ fn bitcoind_integration(segwit_flag: bool) { }, _ => {} }; - return }); // Use block's hook for asserting expectations diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 076a5f61f3..b305a7429a 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -13,15 +13,12 @@ use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{ StacksBlockHeader, StacksPrivateKey, StacksTransaction, TransactionPayload, }; -use stacks::core; use stacks::core::{ - StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, + self, EpochList, StacksEpoch, StacksEpochId, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, }; use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksAddress, VRFSeed, -}; +use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, VRFSeed}; use stacks_common::util::hash::hex_bytes; use stacks_common::util::sleep_ms; @@ -50,15 +47,15 @@ fn test_exact_block_costs() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let epoch_205_transition_height = 210; let transactions_to_broadcast = 25; let (mut conf, _miner_account) = neon_integration_test_conf(); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_205_transition_height; - epochs[2].start_height = epoch_205_transition_height; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_205_transition_height; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_205_transition_height; conf.burnchain.epochs = Some(epochs); conf.node.mine_microblocks = true; @@ -256,10 +253,8 @@ fn test_exact_block_costs() { if dbget_txs.len() >= 2 { processed_txs_before_205 = true; } - } else { - if dbget_txs.len() >= 2 { - processed_txs_after_205 = true; - } + } else if dbget_txs.len() >= 2 { + processed_txs_after_205 = true; } assert_eq!(mined_anchor_cost, anchor_cost as u64); @@ -287,7 +282,7 @@ fn test_dynamic_db_method_costs() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let contract_name = "test-contract"; let epoch_205_transition_height = 210; @@ -304,9 +299,9 @@ fn test_dynamic_db_method_costs() { "; let (mut conf, _miner_account) = neon_integration_test_conf(); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_205_transition_height; - epochs[2].start_height = epoch_205_transition_height; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_205_transition_height; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_205_transition_height; conf.burnchain.epochs = Some(epochs); @@ -455,8 +450,7 @@ fn test_dynamic_db_method_costs() { .as_i64() .unwrap(); eprintln!( - "Burn height = {}, runtime_cost = {}, function_name = {}", - burn_height, runtime_cost, function_name + "Burn height = {burn_height}, runtime_cost = {runtime_cost}, function_name = {function_name}" ); if function_name == "db-get1" { @@ -507,9 +501,9 @@ fn transition_empty_blocks() { let (mut conf, miner_account) = neon_integration_test_conf(); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; conf.burnchain.epochs = Some(epochs); @@ -569,21 +563,20 @@ fn transition_empty_blocks() { ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip ), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, - tip_info.burn_block_height, - res + tip_info.burn_block_height ); if tip_info.burn_block_height == epoch_2_05 { @@ -716,7 +709,7 @@ fn test_cost_limit_switch_version205() { let (mut conf, _) = neon_integration_test_conf(); // Create a schedule where we lower the read_count on Epoch2_05. - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -769,7 +762,7 @@ fn test_cost_limit_switch_version205() { }, network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]); + ])); conf.burnchain.pox_2_activation = Some(10_003); conf.initial_balances.push(InitialBalance { @@ -831,7 +824,7 @@ fn test_cost_limit_switch_version205() { &test_observer::get_blocks(), |transaction| match &transaction.payload { TransactionPayload::SmartContract(contract, ..) => { - contract.name == ContractName::try_from("increment-contract").unwrap() + contract.name == ContractName::from("increment-contract") } _ => false, }, @@ -847,7 +840,7 @@ fn test_cost_limit_switch_version205() { 0, 1000, conf.burnchain.chain_id, - &creator_addr.into(), + &creator_addr, "increment-contract", "increment-many", &[], @@ -863,7 +856,7 @@ fn test_cost_limit_switch_version205() { &test_observer::get_blocks(), |transaction| match &transaction.payload { TransactionPayload::ContractCall(contract) => { - contract.contract_name == ContractName::try_from("increment-contract").unwrap() + contract.contract_name == ContractName::from("increment-contract") } _ => false, }, @@ -882,7 +875,7 @@ fn test_cost_limit_switch_version205() { 0, 1000, conf.burnchain.chain_id, - &creator_addr.into(), + &creator_addr, "increment-contract", "increment-many", &[], @@ -897,7 +890,7 @@ fn test_cost_limit_switch_version205() { &test_observer::get_blocks(), |transaction| match &transaction.payload { TransactionPayload::ContractCall(contract) => { - contract.contract_name == ContractName::try_from("increment-contract").unwrap() + contract.contract_name == ContractName::from("increment-contract") } _ => false, }, @@ -916,10 +909,7 @@ fn bigger_microblock_streams_in_2_05() { return; } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -940,7 +930,7 @@ fn bigger_microblock_streams_in_2_05() { conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -980,7 +970,7 @@ fn bigger_microblock_streams_in_2_05() { }, network_epoch: PEER_VERSION_EPOCH_2_05, }, - ]); + ])); conf.burnchain.pox_2_activation = Some(10_003); let txs: Vec> = spender_sks @@ -993,7 +983,7 @@ fn bigger_microblock_streams_in_2_05() { 0, 1049230, conf.burnchain.chain_id, - &format!("large-{}", ix), + &format!("large-{ix}"), &format!(" ;; a single one of these transactions consumes over half the runtime budget (define-constant BUFF_TO_BYTE (list @@ -1035,9 +1025,8 @@ fn bigger_microblock_streams_in_2_05() { ) ) (begin - (crash-me \"{}\")) - ", - &format!("large-contract-{}", &ix) + (crash-me \"large-contract-{ix}\")) + " ) ) }) @@ -1176,9 +1165,9 @@ fn bigger_microblock_streams_in_2_05() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("costs-2").is_some() { + if tsc.name.to_string().contains("costs-2") { in_205 = true; - } else if tsc.name.to_string().find("large").is_some() { + } else if tsc.name.to_string().contains("large") { num_big_microblock_txs += 1; if in_205 { total_big_txs_per_microblock_205 += 1; @@ -1209,7 +1198,7 @@ fn bigger_microblock_streams_in_2_05() { max_big_txs_per_microblock_20 = num_big_microblock_txs; } - eprintln!("Epoch size: {:?}", &total_execution_cost); + eprintln!("Epoch size: {total_execution_cost:?}"); if !in_205 && total_execution_cost.exceeds(&epoch_20_stream_cost) { epoch_20_stream_cost = total_execution_cost; @@ -1232,21 +1221,13 @@ fn bigger_microblock_streams_in_2_05() { } eprintln!( - "max_big_txs_per_microblock_20: {}, total_big_txs_per_microblock_20: {}", - max_big_txs_per_microblock_20, total_big_txs_per_microblock_20 - ); - eprintln!( - "max_big_txs_per_microblock_205: {}, total_big_txs_per_microblock_205: {}", - max_big_txs_per_microblock_205, total_big_txs_per_microblock_205 - ); - eprintln!( - "confirmed stream execution in 2.0: {:?}", - &epoch_20_stream_cost + "max_big_txs_per_microblock_20: {max_big_txs_per_microblock_20}, total_big_txs_per_microblock_20: {total_big_txs_per_microblock_20}" ); eprintln!( - "confirmed stream execution in 2.05: {:?}", - &epoch_205_stream_cost + "max_big_txs_per_microblock_205: {max_big_txs_per_microblock_205}, total_big_txs_per_microblock_205: {total_big_txs_per_microblock_205}" ); + eprintln!("confirmed stream execution in 2.0: {epoch_20_stream_cost:?}"); + eprintln!("confirmed stream execution in 2.05: {epoch_205_stream_cost:?}"); // stuff happened assert!(epoch_20_stream_cost.runtime > 0); diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 8f6c466318..55d3ee0b7b 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1,6 +1,7 @@ use std::collections::{HashMap, HashSet}; use std::{env, thread}; +use ::core::str; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::ClarityVersion; use stacks::burnchains::bitcoin::address::{ @@ -23,8 +24,7 @@ use stacks::chainstate::stacks::miner::{ }; use stacks::chainstate::stacks::StacksBlockHeader; use stacks::clarity_cli::vm_execute as execute; -use stacks::core; -use stacks::core::BURNCHAIN_TX_SEARCH_WINDOW; +use stacks::core::{self, EpochList, BURNCHAIN_TX_SEARCH_WINDOW}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, VRFSeed, @@ -46,7 +46,7 @@ use crate::tests::neon_integrations::*; use crate::tests::*; use crate::{neon, BitcoinRegtestController, BurnchainController, Keychain}; -const MINER_BURN_PUBLIC_KEY: &'static str = +const MINER_BURN_PUBLIC_KEY: &str = "03dc62fe0b8964d01fc9ca9a5eec0e22e557a12cc656919e648f04e0b26fea5faa"; fn advance_to_2_1( @@ -73,11 +73,11 @@ fn advance_to_2_1( conf.miner.block_reward_recipient = block_reward_recipient; test_observer::register_any(&mut conf); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; conf.burnchain.epochs = Some(epochs); @@ -127,7 +127,7 @@ fn advance_to_2_1( btc_regtest_controller.bootstrap_chain(1); let mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); - debug!("Mining pubkey is {}", &mining_pubkey); + debug!("Mining pubkey is {mining_pubkey}"); btc_regtest_controller.set_mining_pubkey(MINER_BURN_PUBLIC_KEY.to_string()); mining_pubkey @@ -135,7 +135,7 @@ fn advance_to_2_1( btc_regtest_controller.bootstrap_chain(1); let mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); - debug!("Mining pubkey is {}", &mining_pubkey); + debug!("Mining pubkey is {mining_pubkey}"); btc_regtest_controller.set_mining_pubkey(MINER_BURN_PUBLIC_KEY.to_string()); btc_regtest_controller.bootstrap_chain(1); @@ -153,8 +153,8 @@ fn advance_to_2_1( .get_all_utxos(&Secp256k1PublicKey::from_hex(&mining_pubkey).unwrap()); eprintln!( - "UTXOs for {} (segwit={}): {:?}", - &mining_pubkey, conf.miner.segwit, &utxos + "UTXOs for {mining_pubkey} (segwit={}): {utxos:?}", + conf.miner.segwit ); assert_eq!(utxos.len(), 1); @@ -197,8 +197,8 @@ fn advance_to_2_1( let pox_info = get_pox_info(&http_origin).unwrap(); eprintln!( - "\nPoX info at {}\n{:?}\n\n", - tip_info.burn_block_height, &pox_info + "\nPoX info at {}\n{pox_info:?}\n\n", + tip_info.burn_block_height ); // this block is the epoch transition? @@ -210,13 +210,13 @@ fn advance_to_2_1( ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip @@ -224,7 +224,6 @@ fn advance_to_2_1( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, tip_info.burn_block_height, - res ); if tip_info.burn_block_height >= epoch_2_1 { @@ -251,7 +250,7 @@ fn advance_to_2_1( true, ) .unwrap_err(); - eprintln!("No pox-2: {}", &e); + eprintln!("No pox-2: {e}"); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -264,13 +263,13 @@ fn advance_to_2_1( assert_eq!(account.nonce, 9); eprintln!("Begin Stacks 2.1"); - return ( + ( conf, btcd_controller, btc_regtest_controller, blocks_processed, channel, - ); + ) } #[test] @@ -285,7 +284,7 @@ fn transition_adds_burn_block_height() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let (conf, _btcd_controller, mut btc_regtest_controller, blocks_processed, coord_channel) = advance_to_2_1( @@ -409,11 +408,10 @@ fn transition_adds_burn_block_height() { for event in events.iter() { if let Some(cev) = event.get("contract_event") { // strip leading `0x` - eprintln!("{:#?}", &cev); + eprintln!("{cev:#?}"); let clarity_serialized_value = hex_bytes( - &String::from_utf8( - cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..] - .to_vec(), + str::from_utf8( + &cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..], ) .unwrap(), ) @@ -544,7 +542,7 @@ fn transition_fixes_bitcoin_rigidity() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let _spender_btc_addr = BitcoinAddress::from_bytes_legacy( BitcoinNetworkType::Regtest, LegacyBitcoinAddressType::PublicKeyHash, @@ -554,7 +552,7 @@ fn transition_fixes_bitcoin_rigidity() { let spender_2_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_2_stx_addr: StacksAddress = to_addr(&spender_2_sk); - let spender_2_addr: PrincipalData = spender_2_stx_addr.clone().into(); + let spender_2_addr: PrincipalData = spender_2_stx_addr.into(); let epoch_2_05 = 210; let epoch_2_1 = 215; @@ -576,11 +574,11 @@ fn transition_fixes_bitcoin_rigidity() { conf.initial_balances.append(&mut initial_balances); test_observer::register_any(&mut conf); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; conf.burnchain.epochs = Some(epochs); @@ -655,7 +653,7 @@ fn transition_fixes_bitcoin_rigidity() { // okay, let's send a pre-stx op for a transfer-stx op that will get mined before the 2.1 epoch let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -687,8 +685,8 @@ fn transition_fixes_bitcoin_rigidity() { let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -698,7 +696,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller @@ -728,21 +726,20 @@ fn transition_fixes_bitcoin_rigidity() { ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip ), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, - tip_info.burn_block_height, - res + tip_info.burn_block_height ); if tip_info.burn_block_height >= epoch_2_1 { @@ -778,7 +775,7 @@ fn transition_fixes_bitcoin_rigidity() { true, ) .unwrap_err(); - eprintln!("No pox-2: {}", &e); + eprintln!("No pox-2: {e}"); // costs-3 should NOT be initialized let e = get_contract_src( @@ -788,7 +785,7 @@ fn transition_fixes_bitcoin_rigidity() { true, ) .unwrap_err(); - eprintln!("No costs-3: {}", &e); + eprintln!("No costs-3: {e}"); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -812,7 +809,7 @@ fn transition_fixes_bitcoin_rigidity() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -840,8 +837,8 @@ fn transition_fixes_bitcoin_rigidity() { let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -851,7 +848,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller @@ -885,7 +882,7 @@ fn transition_fixes_bitcoin_rigidity() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_2_stx_addr.clone(), + output: spender_2_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -914,8 +911,8 @@ fn transition_fixes_bitcoin_rigidity() { // let's fire off our transfer op. let transfer_stx_op = TransferStxOp { - sender: spender_2_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_2_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -925,7 +922,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_2_sk, false); btc_regtest_controller .submit_manual( @@ -952,7 +949,7 @@ fn transition_fixes_bitcoin_rigidity() { // let's fire off another transfer op that will fall outside the window let pre_stx_op = PreStxOp { - output: spender_2_stx_addr.clone(), + output: spender_2_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -980,8 +977,8 @@ fn transition_fixes_bitcoin_rigidity() { }; let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 123, memo: vec![], // to be filled in @@ -991,7 +988,7 @@ fn transition_fixes_bitcoin_rigidity() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_2_sk, false); btc_regtest_controller .submit_manual( @@ -1070,11 +1067,7 @@ fn transition_adds_get_pox_addr_recipients() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (conf, _btcd_controller, mut btc_regtest_controller, blocks_processed, coord_channel) = advance_to_2_1(initial_balances, None, Some(pox_constants.clone()), false); @@ -1094,11 +1087,10 @@ fn transition_adds_get_pox_addr_recipients() { .iter() .enumerate() { - let spender_sk = spender_sks[i].clone(); + let spender_sk = spender_sks[i]; let pox_addr_tuple = execute( &format!( - "{{ hashbytes: 0x{}, version: 0x{:02x} }}", - pox_pubkey_hash, + "{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x{:02x} }}", &(*addr_variant as u8) ), ClarityVersion::Clarity2, @@ -1126,9 +1118,8 @@ fn transition_adds_get_pox_addr_recipients() { } // stack some STX to segwit addressses - for i in 4..7 { - let spender_sk = spender_sks[i].clone(); - let pubk = Secp256k1PublicKey::from_private(&spender_sk); + for (i, spender_sk) in spender_sks.iter().enumerate().take(7).skip(4) { + let pubk = Secp256k1PublicKey::from_private(spender_sk); let version = i as u8; let bytes = match i { 4 => { @@ -1141,13 +1132,13 @@ fn transition_adds_get_pox_addr_recipients() { } }; let pox_addr_tuple = execute( - &format!("{{ hashbytes: 0x{}, version: 0x{:02x} }}", &bytes, &version), + &format!("{{ hashbytes: 0x{bytes}, version: 0x{version:02x} }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let tx = make_contract_call( - &spender_sk, + spender_sk, 0, 300, conf.burnchain.chain_id, @@ -1183,7 +1174,7 @@ fn transition_adds_get_pox_addr_recipients() { ) "; - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sks[0])); + let spender_addr_c32 = to_addr(&spender_sks[0]); let contract_tx = make_contract_publish( &spender_sks[0], 1, @@ -1197,17 +1188,15 @@ fn transition_adds_get_pox_addr_recipients() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); test_observer::clear(); // mine through two reward cycles // now let's mine until the next reward cycle starts ... - while sort_height - < (stack_sort_height as u64) + (((2 * pox_constants.reward_cycle_length) + 1) as u64) - { + while sort_height < stack_sort_height + (((2 * pox_constants.reward_cycle_length) + 1) as u64) { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = coord_channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let cc_tx = make_contract_call( @@ -1244,13 +1233,12 @@ fn transition_adds_get_pox_addr_recipients() { let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if parsed.txid() == cc_txid { // check events for this block - for (_i, event) in events.iter().enumerate() { + for event in events.iter() { if let Some(cev) = event.get("contract_event") { // strip leading `0x` let clarity_serialized_value = hex_bytes( - &String::from_utf8( - cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..] - .to_vec(), + str::from_utf8( + &cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..], ) .unwrap(), ) @@ -1293,16 +1281,16 @@ fn transition_adds_get_pox_addr_recipients() { .unwrap(); // NOTE: there's an even number of payouts here, so this works - eprintln!("payout at {} = {}", burn_block_height, &payout); + eprintln!("payout at {burn_block_height} = {payout}"); if pox_constants.is_in_prepare_phase(0, burn_block_height) { // in prepare phase - eprintln!("{} in prepare phase", burn_block_height); + eprintln!("{burn_block_height} in prepare phase"); assert_eq!(payout, conf.burnchain.burn_fee_cap as u128); assert_eq!(pox_addr_tuples.len(), 1); } else { // in reward phase - eprintln!("{} in reward phase", burn_block_height); + eprintln!("{burn_block_height} in reward phase"); assert_eq!( payout, (conf.burnchain.burn_fee_cap / (OUTPUTS_PER_COMMIT as u64)) @@ -1313,10 +1301,11 @@ fn transition_adds_get_pox_addr_recipients() { for pox_addr_value in pox_addr_tuples.into_iter() { let pox_addr = - PoxAddress::try_from_pox_tuple(false, &pox_addr_value).expect( - &format!("FATAL: invalid PoX tuple {:?}", &pox_addr_value), - ); - eprintln!("at {}: {:?}", burn_block_height, &pox_addr); + PoxAddress::try_from_pox_tuple(false, &pox_addr_value) + .unwrap_or_else(|| { + panic!("FATAL: invalid PoX tuple {pox_addr_value:?}") + }); + eprintln!("at {burn_block_height}: {pox_addr:?}"); if !pox_addr.is_burn() { found_pox_addrs.insert(pox_addr); } @@ -1328,14 +1317,14 @@ fn transition_adds_get_pox_addr_recipients() { } } - eprintln!("found pox addrs: {:?}", &found_pox_addrs); + eprintln!("found pox addrs: {found_pox_addrs:?}"); assert_eq!(found_pox_addrs.len(), 7); for addr in found_pox_addrs .into_iter() .map(|addr| Value::Tuple(addr.as_clarity_tuple().unwrap())) { - eprintln!("Contains: {:?}", &addr); + eprintln!("Contains: {addr:?}"); assert!(expected_pox_addrs.contains(&addr.to_string())); } } @@ -1388,7 +1377,7 @@ fn transition_adds_mining_from_segwit() { let utxos = btc_regtest_controller .get_all_utxos(&Secp256k1PublicKey::from_hex(MINER_BURN_PUBLIC_KEY).unwrap()); - assert!(utxos.len() > 0); + assert!(!utxos.is_empty()); // all UTXOs should be segwit for utxo in utxos.iter() { @@ -1398,7 +1387,7 @@ fn transition_adds_mining_from_segwit() { ); if let Some(BitcoinAddress::Segwit(SegwitBitcoinAddress::P2WPKH(..))) = &utxo_addr { } else { - panic!("UTXO address was {:?}", &utxo_addr); + panic!("UTXO address was {utxo_addr:?}"); } } @@ -1428,10 +1417,10 @@ fn transition_adds_mining_from_segwit() { SortitionDB::get_block_commits_by_block(sortdb.conn(), &tip.sortition_id).unwrap(); assert_eq!(commits.len(), 1); - let txid = commits[0].txid.clone(); + let txid = commits[0].txid; let tx = btc_regtest_controller.get_raw_transaction(&txid); - eprintln!("tx = {:?}", &tx); + eprintln!("tx = {tx:?}"); assert_eq!(tx.input[0].witness.len(), 2); let addr = BitcoinAddress::try_from_segwit( false, @@ -1462,11 +1451,7 @@ fn transition_removes_pox_sunset() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -1496,11 +1481,11 @@ fn transition_removes_pox_sunset() { let epoch_21 = epoch_21_rc * reward_cycle_len + 1; - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = 1; - epochs[2].start_height = 1; - epochs[2].end_height = epoch_21; - epochs[3].start_height = epoch_21; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = 1; + epochs[StacksEpochId::Epoch2_05].start_height = 1; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_21; + epochs[StacksEpochId::Epoch21].start_height = epoch_21; conf.burnchain.epochs = Some(epochs); @@ -1518,8 +1503,8 @@ fn transition_removes_pox_sunset() { 4 * prepare_phase_len / 5, 5, 15, - (sunset_start_rc * reward_cycle_len - 1).into(), - (sunset_end_rc * reward_cycle_len).into(), + sunset_start_rc * reward_cycle_len - 1, + sunset_end_rc * reward_cycle_len, (epoch_21 as u32) + 1, u32::MAX, u32::MAX, @@ -1573,11 +1558,8 @@ fn transition_removes_pox_sunset() { let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 0); let tx = make_contract_call( @@ -1591,7 +1573,7 @@ fn transition_removes_pox_sunset() { &[ Value::UInt(first_bal as u128 - 260 * 3), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -1605,29 +1587,26 @@ fn transition_removes_pox_sunset() { submit_tx(&http_origin, &tx); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-1: {}", sort_height); + eprintln!("Sort height pox-1: {sort_height}"); // advance to next reward cycle for _i in 0..(reward_cycle_len * 2 + 2) { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-1: {} <= {}", sort_height, epoch_21); + eprintln!("Sort height pox-1: {sort_height} <= {epoch_21}"); } // pox must activate let pox_info = get_pox_info(&http_origin).unwrap(); - eprintln!("pox_info in pox-1 = {:?}", &pox_info); - assert_eq!(pox_info.current_cycle.is_pox_active, true); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + eprintln!("pox_info in pox-1 = {pox_info:?}"); + assert!(pox_info.current_cycle.is_pox_active); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); // advance to 2.1 while sort_height <= epoch_21 + 1 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-1: {} <= {}", sort_height, epoch_21); + eprintln!("Sort height pox-1: {sort_height} <= {epoch_21}"); } let pox_info = get_pox_info(&http_origin).unwrap(); @@ -1635,12 +1614,9 @@ fn transition_removes_pox_sunset() { // pox is still "active" despite unlock, because there's enough participation, and also even // though the v1 block height has passed, the pox-2 contract won't be managing reward sets // until the next reward cycle - eprintln!("pox_info in pox-2 = {:?}", &pox_info); - assert_eq!(pox_info.current_cycle.is_pox_active, true); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox-2") - ); + eprintln!("pox_info in pox-2 = {pox_info:?}"); + assert!(pox_info.current_cycle.is_pox_active); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox-2"); // re-stack let tx = make_contract_call( @@ -1654,7 +1630,7 @@ fn transition_removes_pox_sunset() { &[ Value::UInt(first_bal as u128 - 260 * 3), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -1671,30 +1647,24 @@ fn transition_removes_pox_sunset() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!( - "Sort height pox-1 to pox-2 with stack-stx to pox-2: {}", - sort_height - ); + eprintln!("Sort height pox-1 to pox-2 with stack-stx to pox-2: {sort_height}"); let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!(pox_info.current_cycle.is_pox_active, true); + assert!(pox_info.current_cycle.is_pox_active); // get pox back online while sort_height <= epoch_21 + reward_cycle_len { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-2: {}", sort_height); + eprintln!("Sort height pox-2: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); - eprintln!("pox_info = {:?}", &pox_info); - assert_eq!(pox_info.current_cycle.is_pox_active, true); + eprintln!("pox_info = {pox_info:?}"); + assert!(pox_info.current_cycle.is_pox_active); // first full reward cycle with pox-2 - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox-2") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox-2"); let burn_blocks = test_observer::get_burn_blocks(); let mut pox_out_opt = None; @@ -1719,9 +1689,9 @@ fn transition_removes_pox_sunset() { if (i as u64) < (sunset_start_rc * reward_cycle_len) { // before sunset - if recipients.len() >= 1 { + if !recipients.is_empty() { for (_, amt) in recipients.into_iter() { - pox_out_opt = if let Some(pox_out) = pox_out_opt.clone() { + pox_out_opt = if let Some(pox_out) = pox_out_opt { Some(std::cmp::max(amt, pox_out)) } else { Some(amt) @@ -1730,16 +1700,16 @@ fn transition_removes_pox_sunset() { } } else if (i as u64) >= (sunset_start_rc * reward_cycle_len) && (i as u64) + 1 < epoch_21 { // some sunset burn happened - let pox_out = pox_out_opt.clone().unwrap(); - if recipients.len() >= 1 { + let pox_out = pox_out_opt.unwrap(); + if !recipients.is_empty() { for (_, amt) in recipients.into_iter() { assert!(amt < pox_out); } } } else if (i as u64) + 1 >= epoch_21 { // no sunset burn happened - let pox_out = pox_out_opt.clone().unwrap(); - if recipients.len() >= 1 { + let pox_out = pox_out_opt.unwrap(); + if !recipients.is_empty() { for (_, amt) in recipients.into_iter() { // NOTE: odd number of reward cycles if !burnchain_config.is_in_prepare_phase((i + 2) as u64) { @@ -1769,11 +1739,11 @@ fn transition_empty_blocks() { let (mut conf, miner_account) = neon_integration_test_conf(); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; conf.node.mine_microblocks = false; conf.burnchain.max_rbf = 1000000; @@ -1862,8 +1832,8 @@ fn transition_empty_blocks() { let pox_info = get_pox_info(&http_origin).unwrap(); eprintln!( - "\nPoX info at {}\n{:?}\n\n", - tip_info.burn_block_height, &pox_info + "\nPoX info at {}\n{pox_info:?}\n\n", + tip_info.burn_block_height ); // this block is the epoch transition? @@ -1875,21 +1845,20 @@ fn transition_empty_blocks() { ) .unwrap(); let res = StacksChainState::block_crosses_epoch_boundary( - &chainstate.db(), + chainstate.db(), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip ), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, - tip_info.burn_block_height, - res + tip_info.burn_block_height ); if tip_info.burn_block_height == epoch_2_05 || tip_info.burn_block_height == epoch_2_1 { @@ -1987,8 +1956,8 @@ pub fn wait_pox_stragglers(confs: &[Config], max_stacks_tip: u64, block_time_ms: let mut stacks_tip_bhh = None; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.stacks_tip_height < max_stacks_tip { straggler = true; @@ -2050,22 +2019,16 @@ fn test_pox_reorgs_three_flaps() { conf_template.node.require_affirmed_anchor_blocks = false; // make epoch 2.1 start in the middle of boot-up - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = 101; - epochs[2].start_height = 101; - epochs[2].end_height = 151; - epochs[3].start_height = 151; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = 101; + epochs[StacksEpochId::Epoch2_05].start_height = 101; + epochs[StacksEpochId::Epoch2_05].end_height = 151; + epochs[StacksEpochId::Epoch21].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -2112,7 +2075,7 @@ fn test_pox_reorgs_three_flaps() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -2124,9 +2087,9 @@ fn test_pox_reorgs_three_flaps() { let rpc_port = 41043 + 10 * i; let p2p_port = 41043 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); // conf.connection_options.inv_reward_cycles = 10; @@ -2134,16 +2097,14 @@ fn test_pox_reorgs_three_flaps() { } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -2151,8 +2112,8 @@ fn test_pox_reorgs_three_flaps() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -2187,10 +2148,10 @@ fn test_pox_reorgs_three_flaps() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -2215,8 +2176,8 @@ fn test_pox_reorgs_three_flaps() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -2224,7 +2185,7 @@ fn test_pox_reorgs_three_flaps() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -2238,23 +2199,19 @@ fn test_pox_reorgs_three_flaps() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -2264,19 +2221,14 @@ fn test_pox_reorgs_three_flaps() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -2288,7 +2240,7 @@ fn test_pox_reorgs_three_flaps() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -2311,11 +2263,9 @@ fn test_pox_reorgs_three_flaps() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -2325,8 +2275,8 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -2337,15 +2287,15 @@ fn test_pox_reorgs_three_flaps() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -2353,8 +2303,8 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); //assert_eq!(tip_info.affirmations.heaviest, AffirmationMap::decode("nnnnnnnnnnnnnnnnnnnnp").unwrap()); } @@ -2369,13 +2319,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2386,21 +2336,21 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2411,8 +2361,8 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -2423,13 +2373,13 @@ fn test_pox_reorgs_three_flaps() { // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2440,8 +2390,8 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history continues to overtake miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -2452,13 +2402,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2469,8 +2419,8 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 0 may have won here, but its affirmation map isn't yet the heaviest. } @@ -2479,13 +2429,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2496,8 +2446,8 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 0's affirmation map now becomes the heaviest. } @@ -2506,13 +2456,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2524,8 +2474,8 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 0's affirmation map is now the heaviest, and there's no longer a tie. max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); @@ -2538,24 +2488,21 @@ fn test_pox_reorgs_three_flaps() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate - eprintln!( - "Wait for all blocks to propagate; max tip is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; max tip is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on affirmation maps for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -2592,22 +2539,16 @@ fn test_pox_reorg_one_flap() { conf_template.node.require_affirmed_anchor_blocks = false; // make epoch 2.1 start in the middle of boot-up - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = 101; - epochs[2].start_height = 101; - epochs[2].end_height = 151; - epochs[3].start_height = 151; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = 101; + epochs[StacksEpochId::Epoch2_05].start_height = 101; + epochs[StacksEpochId::Epoch2_05].end_height = 151; + epochs[StacksEpochId::Epoch21].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -2654,7 +2595,7 @@ fn test_pox_reorg_one_flap() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -2666,24 +2607,22 @@ fn test_pox_reorg_one_flap() { let rpc_port = 41063 + 10 * i; let p2p_port = 41063 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -2691,8 +2630,8 @@ fn test_pox_reorg_one_flap() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -2727,10 +2666,10 @@ fn test_pox_reorg_one_flap() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -2755,8 +2694,8 @@ fn test_pox_reorg_one_flap() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -2764,7 +2703,7 @@ fn test_pox_reorg_one_flap() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -2778,23 +2717,19 @@ fn test_pox_reorg_one_flap() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -2804,19 +2739,14 @@ fn test_pox_reorg_one_flap() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -2828,7 +2758,7 @@ fn test_pox_reorg_one_flap() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -2851,11 +2781,9 @@ fn test_pox_reorg_one_flap() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -2865,8 +2793,8 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -2877,15 +2805,15 @@ fn test_pox_reorg_one_flap() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -2893,8 +2821,8 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -2907,13 +2835,13 @@ fn test_pox_reorg_one_flap() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2924,21 +2852,21 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2950,8 +2878,8 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -2966,24 +2894,21 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -3018,22 +2943,16 @@ fn test_pox_reorg_flap_duel() { conf_template.node.require_affirmed_anchor_blocks = false; // make epoch 2.1 start in the middle of boot-up - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = 101; - epochs[2].start_height = 101; - epochs[2].end_height = 151; - epochs[3].start_height = 151; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = 101; + epochs[StacksEpochId::Epoch2_05].start_height = 101; + epochs[StacksEpochId::Epoch2_05].end_height = 151; + epochs[StacksEpochId::Epoch21].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -3080,7 +2999,7 @@ fn test_pox_reorg_flap_duel() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -3094,24 +3013,23 @@ fn test_pox_reorg_flap_duel() { let rpc_port = 41083 + 10 * i; let p2p_port = 41083 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); - confs[i].node.set_bootstrap_nodes( + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -3119,8 +3037,8 @@ fn test_pox_reorg_flap_duel() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -3155,10 +3073,10 @@ fn test_pox_reorg_flap_duel() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -3183,8 +3101,8 @@ fn test_pox_reorg_flap_duel() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -3192,7 +3110,7 @@ fn test_pox_reorg_flap_duel() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -3206,23 +3124,19 @@ fn test_pox_reorg_flap_duel() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -3232,19 +3146,14 @@ fn test_pox_reorg_flap_duel() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -3256,7 +3165,7 @@ fn test_pox_reorg_flap_duel() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -3279,11 +3188,9 @@ fn test_pox_reorg_flap_duel() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -3293,8 +3200,8 @@ fn test_pox_reorg_flap_duel() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -3305,15 +3212,15 @@ fn test_pox_reorg_flap_duel() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -3321,8 +3228,8 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); //assert_eq!(tip_info.affirmations.heaviest, AffirmationMap::decode("nnnnnnnnnnnnnnnnnnnnp").unwrap()); } @@ -3344,13 +3251,13 @@ fn test_pox_reorg_flap_duel() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -3362,21 +3269,21 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -3387,8 +3294,8 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -3404,8 +3311,8 @@ fn test_pox_reorg_flap_duel() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation @@ -3415,16 +3322,13 @@ fn test_pox_reorg_flap_duel() { // NOTE: the stacks affirmation maps will differ from the heaviest affirmation map, because the // act of flapping back and forth so much will have caused these nodes to forget about some of // their anchor blocks. This is an artifact of the test. - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -3458,22 +3362,16 @@ fn test_pox_reorg_flap_reward_cycles() { conf_template.node.require_affirmed_anchor_blocks = false; // make epoch 2.1 start in the middle of boot-up - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = 101; - epochs[2].start_height = 101; - epochs[2].end_height = 151; - epochs[3].start_height = 151; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = 101; + epochs[StacksEpochId::Epoch2_05].start_height = 101; + epochs[StacksEpochId::Epoch2_05].end_height = 151; + epochs[StacksEpochId::Epoch21].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -3520,7 +3418,7 @@ fn test_pox_reorg_flap_reward_cycles() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -3532,24 +3430,22 @@ fn test_pox_reorg_flap_reward_cycles() { let rpc_port = 41123 + 10 * i; let p2p_port = 41123 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -3557,8 +3453,8 @@ fn test_pox_reorg_flap_reward_cycles() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in confs.iter() { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -3593,10 +3489,10 @@ fn test_pox_reorg_flap_reward_cycles() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -3621,8 +3517,8 @@ fn test_pox_reorg_flap_reward_cycles() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -3630,7 +3526,7 @@ fn test_pox_reorg_flap_reward_cycles() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -3644,23 +3540,19 @@ fn test_pox_reorg_flap_reward_cycles() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -3670,19 +3562,14 @@ fn test_pox_reorg_flap_reward_cycles() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -3694,7 +3581,7 @@ fn test_pox_reorg_flap_reward_cycles() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -3717,11 +3604,9 @@ fn test_pox_reorg_flap_reward_cycles() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -3731,8 +3616,8 @@ fn test_pox_reorg_flap_reward_cycles() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -3743,15 +3628,15 @@ fn test_pox_reorg_flap_reward_cycles() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -3759,8 +3644,8 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -3780,13 +3665,13 @@ fn test_pox_reorg_flap_reward_cycles() { // miner 1 is disabled for this reward cycle signal_mining_blocked(miner_status[1].clone()); for i in 0..20 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } } @@ -3794,8 +3679,8 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -3803,21 +3688,21 @@ fn test_pox_reorg_flap_reward_cycles() { // miner 0 is disabled for this reward cycle signal_mining_blocked(miner_status[0].clone()); for i in 0..20 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } } signal_mining_ready(miner_status[0].clone()); info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -3833,8 +3718,8 @@ fn test_pox_reorg_flap_reward_cycles() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation @@ -3844,16 +3729,13 @@ fn test_pox_reorg_flap_reward_cycles() { // NOTE: the stacks affirmation maps will differ from the heaviest affirmation map, because the // act of flapping back and forth so much will have caused these nodes to forget about some of // their anchor blocks. This is an artifact of the test. - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -3890,22 +3772,16 @@ fn test_pox_missing_five_anchor_blocks() { conf_template.node.require_affirmed_anchor_blocks = false; // make epoch 2.1 start in the middle of boot-up - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = 101; - epochs[2].start_height = 101; - epochs[2].end_height = 151; - epochs[3].start_height = 151; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = 101; + epochs[StacksEpochId::Epoch2_05].start_height = 101; + epochs[StacksEpochId::Epoch2_05].end_height = 151; + epochs[StacksEpochId::Epoch21].start_height = 151; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -3952,7 +3828,7 @@ fn test_pox_missing_five_anchor_blocks() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -3964,24 +3840,22 @@ fn test_pox_missing_five_anchor_blocks() { let rpc_port = 41103 + 10 * i; let p2p_port = 41103 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -3989,8 +3863,8 @@ fn test_pox_missing_five_anchor_blocks() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -4025,10 +3899,10 @@ fn test_pox_missing_five_anchor_blocks() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -4053,8 +3927,8 @@ fn test_pox_missing_five_anchor_blocks() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -4062,7 +3936,7 @@ fn test_pox_missing_five_anchor_blocks() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -4076,23 +3950,19 @@ fn test_pox_missing_five_anchor_blocks() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -4102,19 +3972,14 @@ fn test_pox_missing_five_anchor_blocks() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -4126,7 +3991,7 @@ fn test_pox_missing_five_anchor_blocks() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -4149,11 +4014,9 @@ fn test_pox_missing_five_anchor_blocks() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -4163,8 +4026,8 @@ fn test_pox_missing_five_anchor_blocks() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -4175,15 +4038,15 @@ fn test_pox_missing_five_anchor_blocks() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -4191,8 +4054,8 @@ fn test_pox_missing_five_anchor_blocks() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -4207,13 +4070,13 @@ fn test_pox_missing_five_anchor_blocks() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {} cycle {}\n\n", i, c); + eprintln!("\n\nBuild block {i} cycle {c}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -4223,8 +4086,8 @@ fn test_pox_missing_five_anchor_blocks() { signal_mining_ready(miner_status[1].clone()); info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } info!("####################### end of cycle ##############################"); @@ -4236,8 +4099,8 @@ fn test_pox_missing_five_anchor_blocks() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation @@ -4245,16 +4108,13 @@ fn test_pox_missing_five_anchor_blocks() { // wait for all blocks to propagate. // miner 1 should learn about all of miner 0's blocks - info!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + info!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}",); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -4290,22 +4150,16 @@ fn test_sortition_divergence_pre_21() { conf_template.node.always_use_affirmation_maps = false; // make epoch 2.1 start after we have created this error condition - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = 101; - epochs[2].start_height = 101; - epochs[2].end_height = 241; - epochs[3].start_height = 241; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = 101; + epochs[StacksEpochId::Epoch2_05].start_height = 101; + epochs[StacksEpochId::Epoch2_05].end_height = 241; + epochs[StacksEpochId::Epoch21].start_height = 241; conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -4352,7 +4206,7 @@ fn test_sortition_divergence_pre_21() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -4368,24 +4222,22 @@ fn test_sortition_divergence_pre_21() { let rpc_port = 41113 + 10 * i; let p2p_port = 41113 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } let node_privkey_1 = Secp256k1PrivateKey::from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -4393,8 +4245,8 @@ fn test_sortition_divergence_pre_21() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -4429,10 +4281,10 @@ fn test_sortition_divergence_pre_21() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -4457,8 +4309,8 @@ fn test_sortition_divergence_pre_21() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -4466,7 +4318,7 @@ fn test_sortition_divergence_pre_21() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -4480,23 +4332,19 @@ fn test_sortition_divergence_pre_21() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -4506,19 +4354,14 @@ fn test_sortition_divergence_pre_21() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -4530,7 +4373,7 @@ fn test_sortition_divergence_pre_21() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -4553,11 +4396,9 @@ fn test_sortition_divergence_pre_21() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -4567,8 +4408,8 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -4579,15 +4420,15 @@ fn test_sortition_divergence_pre_21() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -4595,8 +4436,8 @@ fn test_sortition_divergence_pre_21() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -4611,13 +4452,13 @@ fn test_sortition_divergence_pre_21() { // mine a reward cycle in which the 2.05 rules choose a PoX anchor block, but the 2.1 rules do // not. for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len && i < reward_cycle_len - prepare_phase_len + 3 @@ -4645,27 +4486,27 @@ fn test_sortition_divergence_pre_21() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } info!("####################### end of cycle ##############################"); for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } } info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } info!("####################### end of cycle ##############################"); @@ -4673,13 +4514,13 @@ fn test_sortition_divergence_pre_21() { // run some cycles in 2.1 for _ in 0..2 { for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } } } @@ -4690,24 +4531,21 @@ fn test_sortition_divergence_pre_21() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate. // miner 1 should learn about all of miner 0's blocks - info!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + info!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -4722,7 +4560,7 @@ fn trait_invocation_cross_epoch() { let spender_sk = StacksPrivateKey::new(); let spender_addr = PrincipalData::from(to_addr(&spender_sk)); - let spender_addr_c32 = StacksAddress::from(to_addr(&spender_sk)); + let spender_addr_c32 = to_addr(&spender_sk); let trait_contract = "(define-trait simple-method ((foo (uint) (response uint uint)) ))"; let impl_contract = @@ -4749,11 +4587,11 @@ fn trait_invocation_cross_epoch() { }]; conf.initial_balances.append(&mut initial_balances); test_observer::register_any(&mut conf); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -4897,8 +4735,7 @@ fn trait_invocation_cross_epoch() { "invoke-simple", "invocation-2", &[Value::Principal(PrincipalData::Contract( - QualifiedContractIdentifier::parse(&format!("{}.{}", &spender_addr_c32, "impl-simple")) - .unwrap(), + QualifiedContractIdentifier::parse(&format!("{spender_addr_c32}.impl-simple")).unwrap(), ))], ); let invoke_2_txid = submit_tx(&http_origin, &tx); @@ -4907,7 +4744,7 @@ fn trait_invocation_cross_epoch() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); } - let interesting_txids = vec![ + let interesting_txids = [ invoke_txid.clone(), invoke_1_txid.clone(), invoke_2_txid.clone(), @@ -4988,21 +4825,13 @@ fn test_v1_unlock_height_with_current_stackers() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -5023,11 +4852,11 @@ fn test_v1_unlock_height_with_current_stackers() { test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -5089,7 +4918,7 @@ fn test_v1_unlock_height_with_current_stackers() { // stack right away let sort_height = channel.get_sortitions_processed() + 1; let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5110,7 +4939,7 @@ fn test_v1_unlock_height_with_current_stackers() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until epoch 2.1 @@ -5126,7 +4955,7 @@ fn test_v1_unlock_height_with_current_stackers() { let sort_height = channel.get_sortitions_processed() + 1; let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5147,7 +4976,7 @@ fn test_v1_unlock_height_with_current_stackers() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -5181,7 +5010,7 @@ fn test_v1_unlock_height_with_current_stackers() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -5200,12 +5029,10 @@ fn test_v1_unlock_height_with_current_stackers() { assert_eq!(addr_tuple, pox_addr_tuple_1); } } - } else { - if !burnchain_config.is_in_prepare_phase(height) { - assert_eq!(pox_addrs.len(), 2); - for addr_tuple in pox_addrs { - assert_eq!(addr_tuple, pox_addr_tuple_2); - } + } else if !burnchain_config.is_in_prepare_phase(height) { + assert_eq!(pox_addrs.len(), 2); + for addr_tuple in pox_addrs { + assert_eq!(addr_tuple, pox_addr_tuple_2); } } } @@ -5251,21 +5078,13 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -5286,11 +5105,11 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -5355,7 +5174,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5376,7 +5195,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -5404,7 +5223,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5425,7 +5244,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -5459,7 +5278,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -5471,11 +5290,11 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); + debug!("Test burnchain height {height}"); if !burnchain_config.is_in_prepare_phase(height) { let mut have_expected_payout = false; if height < epoch_2_1 + (reward_cycle_len as u64) { - if pox_addrs.len() > 0 { + if !pox_addrs.is_empty() { assert_eq!(pox_addrs.len(), 2); for addr_tuple in pox_addrs { // can either pay to pox tuple 1, or burn @@ -5485,15 +5304,13 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { } } } - } else { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - for addr_tuple in pox_addrs { - // can either pay to pox tuple 2, or burn - assert_ne!(addr_tuple, pox_addr_tuple_1); - if addr_tuple == pox_addr_tuple_2 { - have_expected_payout = true; - } + } else if !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + for addr_tuple in pox_addrs { + // can either pay to pox tuple 2, or burn + assert_ne!(addr_tuple, pox_addr_tuple_1); + if addr_tuple == pox_addr_tuple_2 { + have_expected_payout = true; } } } diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 9bffca7c8a..3bf521d7cb 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -8,8 +8,7 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready}; use stacks::clarity_cli::vm_execute as execute; -use stacks::core; -use stacks::core::STACKS_EPOCH_MAX; +use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks::util_lib::boot::boot_code_id; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::PrivateKey; @@ -58,7 +57,7 @@ fn disable_pox() { let epoch_2_2 = 255; // two blocks before next prepare phase. let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let spender_sk = StacksPrivateKey::new(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); @@ -92,31 +91,19 @@ fn disable_pox() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -137,15 +124,15 @@ fn disable_pox() { test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; - epochs[3].end_height = epoch_2_2; - epochs[4].start_height = epoch_2_2; - epochs[4].end_height = STACKS_EPOCH_MAX; - epochs.truncate(5); + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].end_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].start_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].end_height = STACKS_EPOCH_MAX; + epochs.truncate_after(StacksEpochId::Epoch22); conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -210,14 +197,14 @@ fn disable_pox() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -239,7 +226,7 @@ fn disable_pox() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -267,7 +254,7 @@ fn disable_pox() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -288,7 +275,7 @@ fn disable_pox() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); let tx = make_contract_call( @@ -307,7 +294,7 @@ fn disable_pox() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -334,7 +321,7 @@ fn disable_pox() { &[Value::UInt(increase_by.into())], ); - info!("Submit 2.1 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.1 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); for _i in 0..15 { @@ -361,7 +348,7 @@ fn disable_pox() { &[Value::UInt(5000)], ); - info!("Submit 2.1 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.1 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // finish the cycle after the 2.2 transition, @@ -397,9 +384,9 @@ fn disable_pox() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -407,7 +394,7 @@ fn disable_pox() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -419,38 +406,36 @@ fn disable_pox() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + debug!("Test burnchain height {height}"); + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -518,14 +503,12 @@ fn disable_pox() { for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + expected_slots[&reward_cycle][pox_addr], + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } @@ -544,8 +527,7 @@ fn disable_pox() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr - && parsed.auth.get_origin_nonce() == aborted_increase_nonce + if tx_sender == spender_addr && parsed.auth.get_origin_nonce() == aborted_increase_nonce { let contract_call = match &parsed.payload { TransactionPayload::ContractCall(cc) => cc, @@ -626,31 +608,19 @@ fn pox_2_unlock_all() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -676,15 +646,15 @@ fn pox_2_unlock_all() { }); conf.initial_balances.append(&mut initial_balances); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; - epochs[3].end_height = epoch_2_2; - epochs[4].start_height = epoch_2_2; - epochs[4].end_height = STACKS_EPOCH_MAX; - epochs.truncate(5); + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].end_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].start_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].end_height = STACKS_EPOCH_MAX; + epochs.truncate_after(StacksEpochId::Epoch22); conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -749,14 +719,14 @@ fn pox_2_unlock_all() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -778,7 +748,7 @@ fn pox_2_unlock_all() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -807,7 +777,7 @@ fn pox_2_unlock_all() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -839,7 +809,7 @@ fn pox_2_unlock_all() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -859,7 +829,7 @@ fn pox_2_unlock_all() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -892,7 +862,7 @@ fn pox_2_unlock_all() { // in bitcoin block epoch_2_2 - 1, so `nonce_of_2_1_unlock_ht_call` // will be included in that bitcoin block. // this will build the last block before 2.2 activates - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let tx = make_contract_call( &spender_sk, @@ -913,19 +883,19 @@ fn pox_2_unlock_all() { // in bitcoin block epoch_2_2, so `nonce_of_2_2_unlock_ht_call` // will be included in that bitcoin block. // this block activates 2.2 - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // this *burn block* is when the unlock occurs - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // and this will mine the first block whose parent is the unlock block - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); - info!("spender_1_account = {:?}", spender_1_account); - info!("spender_2_account = {:?}", spender_1_account); + info!("spender_1_account = {spender_1_account:?}"); + info!("spender_2_account = {spender_2_account:?}"); assert_eq!( spender_1_account.balance as u64, @@ -943,7 +913,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_2_account.balance as u64, - spender_2_initial_balance - stacked - (1 * tx_fee), + spender_2_initial_balance - stacked - tx_fee, "Spender 2 should still be locked" ); assert_eq!( @@ -957,13 +927,13 @@ fn pox_2_unlock_all() { // and this will mice the bitcoin block containing the first block whose parent has >= unlock burn block // (which is the criterion for the unlock) - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); - info!("spender_1_account = {:?}", spender_1_account); - info!("spender_2_account = {:?}", spender_1_account); + info!("spender_1_account = {spender_1_account:?}"); + info!("spender_2_account = {spender_2_account:?}"); assert_eq!( spender_1_account.balance, @@ -978,7 +948,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_2_account.balance, - spender_2_initial_balance as u128 - (1 * tx_fee as u128), + spender_2_initial_balance as u128 - tx_fee as u128, "Spender 2 should be unlocked" ); assert_eq!(spender_2_account.locked, 0, "Spender 2 should be unlocked"); @@ -997,20 +967,20 @@ fn pox_2_unlock_all() { 1_000_000, ); - info!("Submit stack transfer tx to {:?}", &http_origin); + info!("Submit stack transfer tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // this wakes up the node to mine the transaction - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // this block selects the previously mined block - next_block_and_wait(&mut &mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let spender_1_account = get_account(&http_origin, &spender_addr); let spender_2_account = get_account(&http_origin, &spender_2_addr); let spender_3_account = get_account(&http_origin, &spender_3_addr); - info!("spender_1_account = {:?}", spender_1_account); - info!("spender_2_account = {:?}", spender_1_account); + info!("spender_1_account = {spender_1_account:?}"); + info!("spender_2_account = {spender_2_account:?}"); assert_eq!( spender_3_account.balance, 1_000_000, @@ -1038,7 +1008,7 @@ fn pox_2_unlock_all() { assert_eq!( spender_2_account.balance, - spender_2_initial_balance as u128 - (1 * tx_fee as u128), + spender_2_initial_balance as u128 - tx_fee as u128, "Spender 2 should be unlocked" ); assert_eq!(spender_2_account.locked, 0, "Spender 2 should be unlocked"); @@ -1080,9 +1050,9 @@ fn pox_2_unlock_all() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -1090,7 +1060,7 @@ fn pox_2_unlock_all() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -1102,38 +1072,36 @@ fn pox_2_unlock_all() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + debug!("Test burnchain height {height}"); + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -1183,18 +1151,16 @@ fn pox_2_unlock_all() { let cycle_counts = match reward_cycle_pox_addrs.get(&reward_cycle) { Some(x) => x, None => { - info!("No reward cycle entry = {}", reward_cycle); + info!("No reward cycle entry = {reward_cycle}"); continue; } }; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + expected_slots[&reward_cycle][pox_addr], + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } @@ -1215,7 +1181,7 @@ fn pox_2_unlock_all() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr + if tx_sender == spender_addr && parsed.auth.get_origin_nonce() == nonce_of_2_2_unlock_ht_call { let contract_call = match &parsed.payload { @@ -1231,7 +1197,7 @@ fn pox_2_unlock_all() { assert_eq!(result.to_string(), format!("(ok u{})", epoch_2_2 + 1)); unlock_ht_22_tested = true; } - if &tx_sender == &spender_addr + if tx_sender == spender_addr && parsed.auth.get_origin_nonce() == nonce_of_2_1_unlock_ht_call { let contract_call = match &parsed.payload { @@ -1292,26 +1258,20 @@ fn test_pox_reorg_one_flap() { conf_template.node.require_affirmed_anchor_blocks = false; // make epoch 2.1 and 2.2 start in the middle of boot-up - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = 101; - epochs[2].start_height = 101; - epochs[2].end_height = 151; - epochs[3].start_height = 151; - epochs[3].end_height = epoch_2_2; - epochs[4].start_height = epoch_2_2; - epochs[4].end_height = STACKS_EPOCH_MAX; - epochs.truncate(5); + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = 101; + epochs[StacksEpochId::Epoch2_05].start_height = 101; + epochs[StacksEpochId::Epoch2_05].end_height = 151; + epochs[StacksEpochId::Epoch21].start_height = 151; + epochs[StacksEpochId::Epoch21].end_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].start_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].end_height = STACKS_EPOCH_MAX; + epochs.truncate_after(StacksEpochId::Epoch22); conf_template.burnchain.epochs = Some(epochs); - let privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); - let stack_privks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let stack_privks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() @@ -1358,7 +1318,7 @@ fn test_pox_reorg_one_flap() { conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; conf.burnchain.epochs = conf_template.burnchain.epochs.clone(); - conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation.clone(); + conf.burnchain.pox_2_activation = conf_template.burnchain.pox_2_activation; conf.node.require_affirmed_anchor_blocks = conf_template.node.require_affirmed_anchor_blocks; @@ -1370,25 +1330,23 @@ fn test_pox_reorg_one_flap() { let rpc_port = 41063 + 10 * i; let p2p_port = 41063 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } let node_privkey_1 = StacksNode::make_node_private_key_from_seed(&confs[0].node.local_peer_seed); - for i in 1..num_miners { - let chain_id = confs[0].burnchain.chain_id; - let peer_version = confs[0].burnchain.peer_version; - let p2p_bind = confs[0].node.p2p_bind.clone(); - - confs[i].node.set_bootstrap_nodes( + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + for conf in confs.iter_mut().skip(1) { + conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -1396,8 +1354,8 @@ fn test_pox_reorg_one_flap() { } // use short reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let pox_constants = PoxConstants::new( reward_cycle_len, prepare_phase_len, @@ -1432,10 +1390,10 @@ fn test_pox_reorg_one_flap() { btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -1460,8 +1418,8 @@ fn test_pox_reorg_one_flap() { let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -1469,7 +1427,7 @@ fn test_pox_reorg_one_flap() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -1483,23 +1441,19 @@ fn test_pox_reorg_one_flap() { ); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -1509,19 +1463,14 @@ fn test_pox_reorg_one_flap() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let sort_height = channels[0].get_sortitions_processed(); // make everyone stack let stacking_txs: Vec<_> = stack_privks .iter() - .enumerate() - .map(|(_i, pk)| { + .map(|pk| { make_contract_call( pk, 0, @@ -1533,7 +1482,7 @@ fn test_pox_reorg_one_flap() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -1556,11 +1505,9 @@ fn test_pox_reorg_one_flap() { .collect(); // everyone locks up - let mut cnt = 0; - for tx in stacking_txs { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); - submit_tx(&http_origin, &tx); - cnt += 1; + for (cnt, tx) in stacking_txs.iter().enumerate() { + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); + submit_tx(&http_origin, tx); } // run a reward cycle @@ -1570,8 +1517,8 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -1582,15 +1529,15 @@ fn test_pox_reorg_one_flap() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } } for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -1598,8 +1545,8 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -1612,13 +1559,13 @@ fn test_pox_reorg_one_flap() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -1629,21 +1576,21 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -1655,8 +1602,8 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -1671,23 +1618,20 @@ fn test_pox_reorg_one_flap() { sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { - let tip_info = get_chain_info(&c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + let tip_info = get_chain_info(c); + info!("Final tip for miner {i}: {tip_info:?}"); } } diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 2355f7521d..92b6a97b8f 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -18,8 +18,7 @@ use std::{env, thread}; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use stacks::burnchains::{Burnchain, PoxConstants}; -use stacks::core; -use stacks::core::STACKS_EPOCH_MAX; +use stacks::core::{self, EpochList, STACKS_EPOCH_MAX}; use stacks_common::util::sleep_ms; use crate::config::InitialBalance; @@ -52,7 +51,7 @@ fn trait_invocation_behavior() { let spender_addr: PrincipalData = to_addr(&spender_sk).into(); let impl_contract_id = - QualifiedContractIdentifier::new(contract_addr.clone().into(), "impl-simple".into()); + QualifiedContractIdentifier::new(contract_addr.into(), "impl-simple".into()); let mut spender_nonce = 0; let fee_amount = 10_000; @@ -103,17 +102,17 @@ fn trait_invocation_behavior() { test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; - epochs[3].end_height = epoch_2_2; - epochs[4].start_height = epoch_2_2; - epochs[4].end_height = epoch_2_3; - epochs[5].start_height = epoch_2_3; - epochs[5].end_height = STACKS_EPOCH_MAX; - epochs.truncate(6); + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].end_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].start_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].end_height = epoch_2_3; + epochs[StacksEpochId::Epoch23].start_height = epoch_2_3; + epochs[StacksEpochId::Epoch23].end_height = STACKS_EPOCH_MAX; + epochs.truncate_after(StacksEpochId::Epoch23); conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -227,9 +226,8 @@ fn trait_invocation_behavior() { submit_tx(&http_origin, &publish_invoke); info!( - "At height = {}, epoch-2.1 = {}", - get_chain_info(&conf).burn_block_height, - epoch_2_1 + "At height = {}, epoch-2.1 = {epoch_2_1}", + get_chain_info(&conf).burn_block_height ); // wait until just before epoch 2.1 loop { @@ -509,7 +507,7 @@ fn trait_invocation_behavior() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - info!("Total spender txs = {}", spender_nonce); + info!("Total spender txs = {spender_nonce}"); let blocks = test_observer::get_blocks(); @@ -526,7 +524,7 @@ fn trait_invocation_behavior() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr { + if tx_sender == spender_addr { let contract_call = match &parsed.payload { TransactionPayload::ContractCall(cc) => cc, // only interested in contract calls @@ -583,29 +581,27 @@ fn trait_invocation_behavior() { assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); } - for tx_nonce in [expected_good_23_3_nonce] { - assert_eq!( - transaction_receipts[&tx_nonce].0.contract_name.as_str(), - "wrap-simple" - ); - assert_eq!( - transaction_receipts[&tx_nonce].0.function_name.as_str(), - "invocation-1" - ); - assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); - } + let tx_nonce = expected_good_23_3_nonce; + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-1" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); - for tx_nonce in [expected_good_23_4_nonce] { - assert_eq!( - transaction_receipts[&tx_nonce].0.contract_name.as_str(), - "wrap-simple" - ); - assert_eq!( - transaction_receipts[&tx_nonce].0.function_name.as_str(), - "invocation-2" - ); - assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); - } + let tx_nonce = expected_good_23_4_nonce; + assert_eq!( + transaction_receipts[&tx_nonce].0.contract_name.as_str(), + "wrap-simple" + ); + assert_eq!( + transaction_receipts[&tx_nonce].0.function_name.as_str(), + "invocation-2" + ); + assert_eq!(&transaction_receipts[&tx_nonce].1.to_string(), "(ok u0)"); for tx_nonce in [expected_bad_22_1_nonce, expected_bad_22_3_nonce] { assert_eq!( @@ -632,7 +628,7 @@ fn trait_invocation_behavior() { } for (key, value) in transaction_receipts.iter() { - eprintln!("{} => {} of {}", key, value.0, value.1); + eprintln!("{key} => {} of {}", value.0, value.1); } test_observer::clear(); diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 26ad007ca7..5e4ff9852a 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -26,7 +26,7 @@ use stacks::chainstate::stacks::boot::RawRewardSetEntry; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::{Error, StacksTransaction, TransactionPayload}; use stacks::clarity_cli::vm_execute as execute; -use stacks::core; +use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_TESTNET_SINGLESIG}; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey}; @@ -55,9 +55,9 @@ pub fn get_reward_set_entries_at_block( ) -> Result, Error> { state .get_reward_addresses(burnchain, sortdb, burn_block_height, block_id) - .and_then(|mut addrs| { + .map(|mut addrs| { addrs.sort_by_key(|k| k.reward_address.bytes()); - Ok(addrs) + addrs }) } @@ -86,7 +86,7 @@ fn fix_to_pox_contract() { let pox_3_activation_height = epoch_2_4; let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let increase_by = 1_000_0000 * (core::MICROSTACKS_PER_STACKS as u64); + let increase_by = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); let spender_sk = StacksPrivateKey::new(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); @@ -110,31 +110,19 @@ fn fix_to_pox_contract() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_3 = Secp256k1PublicKey::from_hex( "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let (mut conf, _) = neon_integration_test_conf(); @@ -155,19 +143,19 @@ fn fix_to_pox_contract() { test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; - epochs[3].end_height = epoch_2_2; - epochs[4].start_height = epoch_2_2; - epochs[4].end_height = epoch_2_3; - epochs[5].start_height = epoch_2_3; - epochs[5].end_height = epoch_2_4; - epochs[6].start_height = epoch_2_4; - epochs[6].end_height = STACKS_EPOCH_MAX; - epochs.truncate(7); + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].end_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].start_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].end_height = epoch_2_3; + epochs[StacksEpochId::Epoch23].start_height = epoch_2_3; + epochs[StacksEpochId::Epoch23].end_height = epoch_2_4; + epochs[StacksEpochId::Epoch24].start_height = epoch_2_4; + epochs[StacksEpochId::Epoch24].end_height = STACKS_EPOCH_MAX; + epochs.truncate_after(StacksEpochId::Epoch24); conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -232,14 +220,14 @@ fn fix_to_pox_contract() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -261,7 +249,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -290,7 +278,7 @@ fn fix_to_pox_contract() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -311,7 +299,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -341,13 +329,13 @@ fn fix_to_pox_contract() { &[Value::UInt(5000)], ); - info!("Submit 2.2 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.2 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // transition to epoch 2.3 loop { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_3 + 1 { + if tip_info.burn_block_height > epoch_2_3 { break; } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -367,7 +355,7 @@ fn fix_to_pox_contract() { &[Value::UInt(5000)], ); - info!("Submit 2.3 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.3 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // transition to 2 blocks before epoch 2.4 @@ -411,7 +399,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit 2.4 stacking tx to {:?}", &http_origin); + info!("Submit 2.4 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -431,7 +419,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit second 2.4 stacking tx to {:?}", &http_origin); + info!("Submit second 2.4 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -458,7 +446,7 @@ fn fix_to_pox_contract() { &[Value::UInt(increase_by.into())], ); - info!("Submit 2.4 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.4 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); for _i in 0..19 { @@ -492,9 +480,9 @@ fn fix_to_pox_contract() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -502,7 +490,7 @@ fn fix_to_pox_contract() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -514,38 +502,36 @@ fn fix_to_pox_contract() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + debug!("Test burnchain height {height}"); + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -624,14 +610,12 @@ fn fix_to_pox_contract() { for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + expected_slots[&reward_cycle][pox_addr], + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } @@ -651,7 +635,7 @@ fn fix_to_pox_contract() { let parsed = StacksTransaction::consensus_deserialize(&mut tx_bytes.as_slice()).unwrap(); let tx_sender = PrincipalData::from(parsed.auth.origin().address_testnet()); - if &tx_sender == &spender_addr + if tx_sender == spender_addr && (parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_2 || parsed.auth.get_origin_nonce() == aborted_increase_nonce_2_3) { @@ -738,21 +722,13 @@ fn verify_auto_unlock_behavior() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash_1 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_1) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_1 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_1).to_bytes()); let pox_pubkey_2 = Secp256k1PublicKey::from_hex( "03cd91307e16c10428dd0120d0a4d37f14d4e0097b3b2ea1651d7bd0fb109cd44b", ) .unwrap(); - let pox_pubkey_hash_2 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_2) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_2 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_2).to_bytes()); let pox_pubkey_2_stx_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -765,11 +741,7 @@ fn verify_auto_unlock_behavior() { "0317782e663c77fb02ebf46a3720f41a70f5678ad185974a456d35848e275fe56b", ) .unwrap(); - let pox_pubkey_hash_3 = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey_3) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash_3 = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey_3).to_bytes()); let pox_pubkey_3_stx_addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -797,19 +769,19 @@ fn verify_auto_unlock_behavior() { test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; - epochs[3].end_height = epoch_2_2; - epochs[4].start_height = epoch_2_2; - epochs[4].end_height = epoch_2_3; - epochs[5].start_height = epoch_2_3; - epochs[5].end_height = epoch_2_4; - epochs[6].start_height = epoch_2_4; - epochs[6].end_height = STACKS_EPOCH_MAX; - epochs.truncate(7); + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].end_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].start_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].end_height = epoch_2_3; + epochs[StacksEpochId::Epoch23].start_height = epoch_2_3; + epochs[StacksEpochId::Epoch23].end_height = epoch_2_4; + epochs[StacksEpochId::Epoch24].start_height = epoch_2_4; + epochs[StacksEpochId::Epoch24].end_height = STACKS_EPOCH_MAX; + epochs.truncate_after(StacksEpochId::Epoch24); conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -879,14 +851,14 @@ fn verify_auto_unlock_behavior() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -908,7 +880,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -937,7 +909,7 @@ fn verify_auto_unlock_behavior() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -958,7 +930,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -979,7 +951,7 @@ fn verify_auto_unlock_behavior() { // transition to epoch 2.3 loop { let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_3 + 1 { + if tip_info.burn_block_height > epoch_2_3 { break; } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -1044,7 +1016,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit 2.4 stacking tx to {:?}", &http_origin); + info!("Submit 2.4 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -1064,7 +1036,7 @@ fn verify_auto_unlock_behavior() { ], ); - info!("Submit second 2.4 stacking tx to {:?}", &http_origin); + info!("Submit second 2.4 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -1113,7 +1085,7 @@ fn verify_auto_unlock_behavior() { .unwrap(); assert_eq!(reward_set_entries.len(), 2); - info!("reward set entries: {:?}", reward_set_entries); + info!("reward set entries: {reward_set_entries:?}"); assert_eq!( reward_set_entries[0].reward_address.bytes(), pox_pubkey_2_stx_addr.bytes.0.to_vec() @@ -1141,7 +1113,7 @@ fn verify_auto_unlock_behavior() { &[Value::UInt(first_stacked_incr.into())], ); - info!("Submit 2.4 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.4 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); for _i in 0..19 { @@ -1213,9 +1185,9 @@ fn verify_auto_unlock_behavior() { .block_height_to_reward_cycle(burnchain_config.first_block_height, height) .unwrap(); - if !reward_cycle_pox_addrs.contains_key(&reward_cycle) { - reward_cycle_pox_addrs.insert(reward_cycle, HashMap::new()); - } + reward_cycle_pox_addrs + .entry(reward_cycle) + .or_insert_with(HashMap::new); let iconn = sortdb.index_handle_at_block(&chainstate, &tip).unwrap(); let pox_addrs = chainstate @@ -1223,7 +1195,7 @@ fn verify_auto_unlock_behavior() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -1235,37 +1207,35 @@ fn verify_auto_unlock_behavior() { .expect_list() .unwrap(); - if !burnchain_config.is_in_prepare_phase(height) { - if pox_addrs.len() > 0 { - assert_eq!(pox_addrs.len(), 2); - let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); - let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); + if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { + assert_eq!(pox_addrs.len(), 2); + let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); + let pox_addr_1 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[1]).unwrap(); - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_0) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_0) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_0, 1); - } + .insert(pox_addr_0, 1); + } - if let Some(pox_slot_count) = reward_cycle_pox_addrs + if let Some(pox_slot_count) = reward_cycle_pox_addrs + .get_mut(&reward_cycle) + .unwrap() + .get_mut(&pox_addr_1) + { + *pox_slot_count += 1; + } else { + reward_cycle_pox_addrs .get_mut(&reward_cycle) .unwrap() - .get_mut(&pox_addr_1) - { - *pox_slot_count += 1; - } else { - reward_cycle_pox_addrs - .get_mut(&reward_cycle) - .unwrap() - .insert(pox_addr_1, 1); - } + .insert(pox_addr_1, 1); } } } @@ -1340,14 +1310,12 @@ fn verify_auto_unlock_behavior() { for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, - expected_slots[&reward_cycle][&pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + expected_slots[&reward_cycle][pox_addr], + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs index 6af1bee626..bedf8721cb 100644 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ b/testnet/stacks-node/src/tests/epoch_25.rs @@ -17,7 +17,7 @@ use std::{env, thread}; use clarity::vm::types::PrincipalData; use stacks::burnchains::{Burnchain, PoxConstants}; -use stacks::core; +use stacks::core::{self, EpochList, StacksEpochId}; use stacks_common::consts::STACKS_EPOCH_MAX; use stacks_common::types::chainstate::StacksPrivateKey; @@ -79,28 +79,28 @@ fn microblocks_disabled() { conf.node.wait_time_for_blocks = 2_000; conf.miner.wait_for_block_download = false; - conf.miner.first_attempt_time_ms = i64::max_value() as u64; - conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; test_observer::spawn(); test_observer::register_any(&mut conf); conf.initial_balances.append(&mut initial_balances); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; - epochs[3].end_height = epoch_2_2; - epochs[4].start_height = epoch_2_2; - epochs[4].end_height = epoch_2_3; - epochs[5].start_height = epoch_2_3; - epochs[5].end_height = epoch_2_4; - epochs[6].start_height = epoch_2_4; - epochs[6].end_height = epoch_2_5; - epochs[7].start_height = epoch_2_5; - epochs[7].end_height = STACKS_EPOCH_MAX; - epochs.truncate(8); + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].end_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].start_height = epoch_2_05; + epochs[StacksEpochId::Epoch2_05].end_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].start_height = epoch_2_1; + epochs[StacksEpochId::Epoch21].end_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].start_height = epoch_2_2; + epochs[StacksEpochId::Epoch22].end_height = epoch_2_3; + epochs[StacksEpochId::Epoch23].start_height = epoch_2_3; + epochs[StacksEpochId::Epoch23].end_height = epoch_2_4; + epochs[StacksEpochId::Epoch24].start_height = epoch_2_4; + epochs[StacksEpochId::Epoch24].end_height = epoch_2_5; + epochs[StacksEpochId::Epoch25].start_height = epoch_2_5; + epochs[StacksEpochId::Epoch25].end_height = STACKS_EPOCH_MAX; + epochs.truncate_after(StacksEpochId::Epoch25); conf.burnchain.epochs = Some(epochs); let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); @@ -111,8 +111,8 @@ fn microblocks_disabled() { 4 * prepare_phase_len / 5, 5, 15, - u64::max_value() - 2, - u64::max_value() - 1, + u64::MAX - 2, + u64::MAX - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, @@ -162,6 +162,9 @@ fn microblocks_disabled() { // push us to block 205 next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // Ensure we start off with 0 microblocks + assert!(test_observer::get_microblocks().is_empty()); + let tx = make_stacks_transfer_mblock_only( &spender_1_sk, 0, @@ -172,7 +175,11 @@ fn microblocks_disabled() { ); submit_tx(&http_origin, &tx); - // wait until just before epoch 2.5 + // Wait for a microblock to be assembled + wait_for(60, || Ok(test_observer::get_microblocks().len() == 1)) + .expect("Failed to wait for microblocks to be assembled"); + + // mine Bitcoin blocks up until just before epoch 2.5 wait_for(120, || { let tip_info = get_chain_info(&conf); if tip_info.burn_block_height >= epoch_2_5 - 2 { @@ -183,6 +190,14 @@ fn microblocks_disabled() { }) .expect("Failed to wait until just before epoch 2.5"); + // Verify that the microblock was processed + let account = get_account(&http_origin, &spender_1_addr); + assert_eq!( + u64::try_from(account.balance).unwrap(), + spender_1_bal - 1_000 + ); + assert_eq!(account.nonce, 1); + let old_tip_info = get_chain_info(&conf); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -194,13 +209,8 @@ fn microblocks_disabled() { .expect("Failed to process block"); info!("Test passed processing 2.5"); - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); + // Submit another microblock only transaction let tx = make_stacks_transfer_mblock_only( &spender_1_sk, 1, @@ -211,19 +221,12 @@ fn microblocks_disabled() { ); submit_tx(&http_origin, &tx); - let mut last_block_height = get_chain_info(&conf).burn_block_height; - for _i in 0..5 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for(30, || { - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - return Ok(true); - } - Ok(false) - }) - .expect("Failed to mine"); - } + // Wait for a microblock to be assembled, but expect none to be assembled + wait_for(30, || Ok(test_observer::get_microblocks().len() > 1)) + .expect_err("Microblocks should not have been assembled"); + + // Mine a block to see if the microblock gets processed + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // second transaction should not have been processed! let account = get_account(&http_origin, &spender_1_addr); @@ -233,31 +236,18 @@ fn microblocks_disabled() { ); assert_eq!(account.nonce, 1); - let microblocks_assembled = test_observer::get_microblocks().len(); - info!("Microblocks assembled: {microblocks_assembled}",); - assert!( - microblocks_assembled > 0, - "There should be at least 1 microblock assembled" - ); - let miner_nonce_before_microblock_assembly = get_account(&http_origin, &miner_account).nonce; // Now, lets tell the miner to try to mine microblocks, but don't try to confirm them! + info!("Setting STACKS_TEST_FORCE_MICROBLOCKS_POST_25"); env::set_var("STACKS_TEST_FORCE_MICROBLOCKS_POST_25", "1"); - let mut last_block_height = get_chain_info(&conf).burn_block_height; - for _i in 0..2 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for(30, || { - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - return Ok(true); - } - Ok(false) - }) - .expect("Failed to mine"); - } + // Wait for a second microblock to be assembled + wait_for(60, || Ok(test_observer::get_microblocks().len() == 2)) + .expect("Failed to wait for microblocks to be assembled"); + + // Mine a block to see if the microblock gets processed + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let miner_nonce_after_microblock_assembly = get_account(&http_origin, &miner_account).nonce; @@ -270,44 +260,35 @@ fn microblocks_disabled() { ); assert_eq!(account.nonce, 1); - // but we should have assembled and announced at least 1 more block to the observer - assert!(test_observer::get_microblocks().len() > microblocks_assembled); info!( "Microblocks assembled: {}", test_observer::get_microblocks().len() ); // and our miner should have gotten some blocks accepted - assert!( - miner_nonce_after_microblock_assembly > miner_nonce_before_microblock_assembly, + assert_eq!( + miner_nonce_after_microblock_assembly, miner_nonce_before_microblock_assembly + 1, "Mined before started microblock assembly: {miner_nonce_before_microblock_assembly}, Mined after started microblock assembly: {miner_nonce_after_microblock_assembly}" ); // Now, tell the miner to try to confirm microblocks as well. // This should test that the block gets rejected by append block + info!("Setting STACKS_TEST_CONFIRM_MICROBLOCKS_POST_25"); env::set_var("STACKS_TEST_CONFIRM_MICROBLOCKS_POST_25", "1"); - let mut last_block_height = get_chain_info(&conf).burn_block_height; - for _i in 0..2 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - wait_for(30, || { - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - return Ok(true); - } - Ok(false) - }) - .expect("Failed to mine"); - } + // Wait for a third microblock to be assembled + wait_for(60, || Ok(test_observer::get_microblocks().len() == 3)) + .expect("Failed to wait for microblocks to be assembled"); + + // Mine a block to see if the microblock gets processed + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let miner_nonce_after_microblock_confirmation = get_account(&http_origin, &miner_account).nonce; - // and our miner should have gotten at most one more block accepted - // (because they may have had 1 block confirmation in the bitcoin mempool which didn't confirm a microblock - // before we flipped the flag) - assert!( - miner_nonce_after_microblock_confirmation <= miner_nonce_after_microblock_assembly + 1, + // our miner should not have gotten any more blocks accepted + assert_eq!( + miner_nonce_after_microblock_confirmation, + miner_nonce_after_microblock_assembly + 1, "Mined after started microblock confimration: {miner_nonce_after_microblock_confirmation}", ); diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 236d76b000..79c3394352 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -1,3 +1,4 @@ +use std::cmp::Ordering; use std::collections::HashMap; use std::fmt::Write; use std::sync::Mutex; @@ -25,8 +26,8 @@ use stacks::clarity_vm::clarity::ClarityConnection; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ - StacksEpoch, StacksEpochId, CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, - PEER_VERSION_EPOCH_2_1, + EpochList, StacksEpoch, StacksEpochId, CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_2_0, + PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, }; use stacks::net::api::callreadonly::CallReadOnlyRequestBody; use stacks::net::api::getaccount::AccountEntryResponse; @@ -43,7 +44,7 @@ use crate::config::InitialBalance; use crate::helium::RunLoop; use crate::tests::make_sponsored_stacks_transfer_on_testnet; -const OTHER_CONTRACT: &'static str = " +const OTHER_CONTRACT: &str = " (define-data-var x uint u0) (define-public (f1) (ok (var-get x))) @@ -51,14 +52,14 @@ const OTHER_CONTRACT: &'static str = " (ok (var-set x val))) "; -const CALL_READ_CONTRACT: &'static str = " +const CALL_READ_CONTRACT: &str = " (define-public (public-no-write) (ok (contract-call? .other f1))) (define-public (public-write) (ok (contract-call? .other f2 u5))) "; -const GET_INFO_CONTRACT: &'static str = " +const GET_INFO_CONTRACT: &str = " (define-map block-data { height: uint } { stacks-hash: (buff 32), @@ -143,7 +144,7 @@ const GET_INFO_CONTRACT: &'static str = " (fn-2 (uint) (response uint uint)))) "; -const IMPL_TRAIT_CONTRACT: &'static str = " +const IMPL_TRAIT_CONTRACT: &str = " ;; explicit trait compliance for trait 1 (impl-trait .get-info.trait-1) (define-private (test-height) burn-block-height) @@ -193,7 +194,7 @@ fn integration_test_get_info() { { let mut http_opt = HTTP_BINDING.lock().unwrap(); - http_opt.replace(format!("http://{}", &rpc_bind)); + http_opt.replace(format!("http://{rpc_bind}")); } run_loop @@ -279,10 +280,10 @@ fn integration_test_get_info() { let old_tip = StacksBlockId::new(&consensus_hash, &header_hash); use std::fs; use std::io::Write; - if fs::metadata(&tmppath).is_ok() { - fs::remove_file(&tmppath).unwrap(); + if fs::metadata(tmppath).is_ok() { + fs::remove_file(tmppath).unwrap(); } - let mut f = fs::File::create(&tmppath).unwrap(); + let mut f = fs::File::create(tmppath).unwrap(); f.write_all(&old_tip.serialize_to_vec()).unwrap(); } else if round == 2 { // block-height = 3 @@ -311,7 +312,7 @@ fn integration_test_get_info() { // block-height > 3 let tx = make_contract_call( &principal_sk, - (round - 3).into(), + round - 3, 10, CHAIN_ID_TESTNET, &to_addr(&contract_sk), @@ -337,7 +338,7 @@ fn integration_test_get_info() { if round >= 1 { let tx_xfer = make_stacks_transfer( &spender_sk, - (round - 1).into(), + round - 1, 10, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), @@ -356,16 +357,14 @@ fn integration_test_get_info() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state(|round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_addr = to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()); let contract_identifier = - QualifiedContractIdentifier::parse(&format!("{}.{}", &contract_addr, "get-info")).unwrap(); + QualifiedContractIdentifier::parse(&format!("{contract_addr}.get-info")).unwrap(); let impl_trait_contract_identifier = - QualifiedContractIdentifier::parse(&format!("{}.{}", &contract_addr, "impl-trait-contract")).unwrap(); + QualifiedContractIdentifier::parse(&format!("{contract_addr}.impl-trait-contract")).unwrap(); let http_origin = { HTTP_BINDING.lock().unwrap().clone().unwrap() @@ -374,7 +373,7 @@ fn integration_test_get_info() { match round { 1 => { // - Chain length should be 2. - let blocks = StacksChainState::list_blocks(&chain_state.db()).unwrap(); + let blocks = StacksChainState::list_blocks(chain_state.db()).unwrap(); assert!(chain_tip.metadata.stacks_block_height == 2); // Block #1 should have 5 txs @@ -382,14 +381,14 @@ fn integration_test_get_info() { let parent = chain_tip.block.header.parent_block; let bhh = &chain_tip.metadata.index_block_hash(); - eprintln!("Current Block: {} Parent Block: {}", bhh, parent); + eprintln!("Current Block: {bhh} Parent Block: {parent}"); let parent_val = Value::buff_from(parent.as_bytes().to_vec()).unwrap(); // find header metadata let mut headers = vec![]; for block in blocks.iter() { let header = StacksChainState::get_anchored_block_header_info(chain_state.db(), &block.0, &block.1).unwrap().unwrap(); - eprintln!("{}/{}: {:?}", &block.0, &block.1, &header); + eprintln!("{}/{}: {header:?}", &block.0, &block.1); headers.push(header); } @@ -500,13 +499,12 @@ fn integration_test_get_info() { burn_dbconn, bhh, &contract_identifier, "(exotic-data-checks u4)")); let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/map_entry/{}/{}/{}", - &http_origin, &contract_addr, "get-info", "block-data"); + let path = format!("{http_origin}/v2/map_entry/{contract_addr}/get-info/block-data"); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(3))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() @@ -514,14 +512,14 @@ fn integration_test_get_info() { let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); let expected_data = chain_state.clarity_eval_read_only(burn_dbconn, bhh, &contract_identifier, "(some (get-exotic-data-info u3))"); - assert!(res.get("proof").is_some()); + assert!(res.contains_key("proof")); assert_eq!(result_data, expected_data); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(100))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() @@ -532,19 +530,18 @@ fn integration_test_get_info() { let sender_addr = to_addr(&StacksPrivateKey::from_hex(SK_3).unwrap()); // now, let's use a query string to get data without a proof - let path = format!("{}/v2/map_entry/{}/{}/{}?proof=0", - &http_origin, &contract_addr, "get-info", "block-data"); + let path = format!("{http_origin}/v2/map_entry/{contract_addr}/get-info/block-data?proof=0"); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(3))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() .unwrap().json::>().unwrap(); - assert!(res.get("proof").is_none()); + assert!(!res.contains_key("proof")); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); let expected_data = chain_state.clarity_eval_read_only(burn_dbconn, bhh, &contract_identifier, "(some (get-exotic-data-info u3))"); @@ -553,19 +550,18 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // now, let's use a query string to get data _with_ a proof - let path = format!("{}/v2/map_entry/{}/{}/{}?proof=1", - &http_origin, &contract_addr, "get-info", "block-data"); + let path = format!("{http_origin}/v2/map_entry/{contract_addr}/get-info/block-data?proof=1"); let key: Value = TupleData::from_data(vec![("height".into(), Value::UInt(3))]) .unwrap().into(); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let res = client.post(&path) .json(&key.serialize_to_hex().unwrap()) .send() .unwrap().json::>().unwrap(); - assert!(res.get("proof").is_some()); + assert!(res.contains_key("proof")); let result_data = Value::try_deserialize_hex_untyped(&res["data"][2..]).unwrap(); let expected_data = chain_state.clarity_eval_read_only(burn_dbconn, bhh, &contract_identifier, "(some (get-exotic-data-info u3))"); @@ -574,9 +570,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // account with a nonce entry + a balance entry - let path = format!("{}/v2/accounts/{}", - &http_origin, &sender_addr); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{sender_addr}"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 99860); assert_eq!(res.nonce, 4); @@ -584,9 +579,8 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // account with a nonce entry but not a balance entry - let path = format!("{}/v2/accounts/{}", - &http_origin, &contract_addr); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{contract_addr}"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 960); assert_eq!(res.nonce, 4); @@ -594,9 +588,8 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // account with a balance entry but not a nonce entry - let path = format!("{}/v2/accounts/{}", - &http_origin, ADDR_4); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{ADDR_4}"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 400); assert_eq!(res.nonce, 0); @@ -604,27 +597,24 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // account with neither! - let path = format!("{}/v2/accounts/{}.get-info", - &http_origin, &contract_addr); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{contract_addr}.get-info"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 0); assert_eq!(res.nonce, 0); assert!(res.nonce_proof.is_some()); assert!(res.balance_proof.is_some()); - let path = format!("{}/v2/accounts/{}?proof=0", - &http_origin, ADDR_4); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{ADDR_4}?proof=0"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 400); assert_eq!(res.nonce, 0); assert!(res.nonce_proof.is_none()); assert!(res.balance_proof.is_none()); - let path = format!("{}/v2/accounts/{}?proof=1", - &http_origin, ADDR_4); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/accounts/{ADDR_4}?proof=1"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 400); assert_eq!(res.nonce, 0); @@ -632,15 +622,15 @@ fn integration_test_get_info() { assert!(res.balance_proof.is_some()); // let's try getting the transfer cost - let path = format!("{}/v2/fees/transfer", &http_origin); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/fees/transfer"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert!(res > 0); // let's get a contract ABI - let path = format!("{}/v2/contracts/interface/{}/{}", &http_origin, &contract_addr, "get-info"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/interface/{contract_addr}/get-info"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); let contract_analysis = mem_type_check(GET_INFO_CONTRACT, ClarityVersion::Clarity2, StacksEpochId::Epoch21).unwrap().1; @@ -652,14 +642,14 @@ fn integration_test_get_info() { // a missing one? - let path = format!("{}/v2/contracts/interface/{}/{}", &http_origin, &contract_addr, "not-there"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/interface/{contract_addr}/not-there"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // let's get a contract SRC - let path = format!("{}/v2/contracts/source/{}/{}", &http_origin, &contract_addr, "get-info"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/source/{contract_addr}/get-info"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(res.source, GET_INFO_CONTRACT); @@ -667,8 +657,8 @@ fn integration_test_get_info() { assert!(res.marf_proof.is_some()); - let path = format!("{}/v2/contracts/source/{}/{}?proof=0", &http_origin, &contract_addr, "get-info"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/source/{contract_addr}/get-info?proof=0"); + eprintln!("Test: GET {path}"); let res = client.get(&path).send().unwrap().json::().unwrap(); assert_eq!(res.source, GET_INFO_CONTRACT); @@ -677,14 +667,14 @@ fn integration_test_get_info() { // a missing one? - let path = format!("{}/v2/contracts/source/{}/{}", &http_origin, &contract_addr, "not-there"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/contracts/source/{contract_addr}/not-there"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // how about a read-only function call! - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", "get-exotic-data-info"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/get-exotic-data-info"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -705,8 +695,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // how about a non read-only function call which does not modify anything - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "main", "public-no-write"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/main/public-no-write"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -732,8 +722,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // how about a non read-only function call which does modify something and should fail - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "main", "public-write"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/main/public-write"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -750,9 +740,8 @@ fn integration_test_get_info() { assert!(res["cause"].as_str().unwrap().contains("NotReadOnly")); // let's try a call with a url-encoded string. - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", - "get-exotic-data-info%3F"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/get-exotic-data-info%3F"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -774,8 +763,8 @@ fn integration_test_get_info() { assert_eq!(result_data, expected_data); // let's have a runtime error! - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", "get-exotic-data-info"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/get-exotic-data-info"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -793,8 +782,8 @@ fn integration_test_get_info() { assert!(res["cause"].as_str().unwrap().contains("UnwrapFailure")); // let's have a runtime error! - let path = format!("{}/v2/contracts/call-read/{}/{}/{}", &http_origin, &contract_addr, "get-info", "update-info"); - eprintln!("Test: POST {}", path); + let path = format!("{http_origin}/v2/contracts/call-read/{contract_addr}/get-info/update-info"); + eprintln!("Test: POST {path}"); let body = CallReadOnlyRequestBody { sender: "'SP139Q3N9RXCJCD1XVA4N5RYWQ5K9XQ0T9PKQ8EE5".into(), @@ -814,13 +803,13 @@ fn integration_test_get_info() { // let's submit a valid transaction! let spender_sk = StacksPrivateKey::from_hex(SK_3).unwrap(); - let path = format!("{}/v2/transactions", &http_origin); - eprintln!("Test: POST {} (valid)", path); + let path = format!("{http_origin}/v2/transactions"); + eprintln!("Test: POST {path} (valid)"); // tx_xfer is 180 bytes long let tx_xfer = make_stacks_transfer( &spender_sk, - round.into(), + round, 200, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), @@ -846,17 +835,17 @@ fn integration_test_get_info() { .send() .unwrap().json::().unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!(res.get("error").unwrap().as_str().unwrap(), "transaction rejected"); assert!(res.get("reason").is_some()); // let's submit an invalid transaction! - let path = format!("{}/v2/transactions", &http_origin); - eprintln!("Test: POST {} (invalid)", path); + let path = format!("{http_origin}/v2/transactions"); + eprintln!("Test: POST {path} (invalid)"); // tx_xfer_invalid is 180 bytes long // bad nonce - let tx_xfer_invalid = make_stacks_transfer(&spender_sk, (round + 30).into(), 200, CHAIN_ID_TESTNET, + let tx_xfer_invalid = make_stacks_transfer(&spender_sk, round + 30, 200, CHAIN_ID_TESTNET, &StacksAddress::from_string(ADDR_4).unwrap().into(), 456); let tx_xfer_invalid_tx = StacksTransaction::consensus_deserialize(&mut &tx_xfer_invalid[..]).unwrap(); @@ -869,39 +858,39 @@ fn integration_test_get_info() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!(res.get("txid").unwrap().as_str().unwrap(), format!("{}", tx_xfer_invalid_tx.txid())); assert_eq!(res.get("error").unwrap().as_str().unwrap(), "transaction rejected"); assert!(res.get("reason").is_some()); // testing /v2/trait// // trait does not exist - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "get-info", &contract_addr, "get-info", "dummy-trait"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/traits/{contract_addr}/get-info/{contract_addr}/get-info/dummy-trait"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // explicit trait compliance - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(res.is_implemented); // No trait found - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-4"); - eprintln!("Test: GET {}", path); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-4"); + eprintln!("Test: GET {path}"); assert_eq!(client.get(&path).send().unwrap().status(), 404); // implicit trait compliance - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-2"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-2"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(res.is_implemented); // invalid trait compliance - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-3"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-3"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(!res.is_implemented); // test query parameters for v2/trait endpoint @@ -911,33 +900,33 @@ fn integration_test_get_info() { let tmppath = "/tmp/integration_test_get_info-old-tip"; use std::fs; use std::io::Read; - let mut f = fs::File::open(&tmppath).unwrap(); + let mut f = fs::File::open(tmppath).unwrap(); let mut buf = vec![]; f.read_to_end(&mut buf).unwrap(); let old_tip = StacksBlockId::consensus_deserialize(&mut &buf[..]).unwrap(); - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}?tip={}", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1", &old_tip); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1?tip={old_tip}"); let res = client.get(&path).send().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert_eq!(res.text().unwrap(), "No contract analysis found or trait definition not found"); // evaluate check for explicit compliance where tip is the chain tip of the first block (contract DNE at that block), but tip is "latest" - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}?tip=latest", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1"); + let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1?tip=latest"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(res.is_implemented); // perform some tests of the fee rate interface - let path = format!("{}/v2/fees/transaction", &http_origin); + let path = format!("{http_origin}/v2/fees/transaction"); let tx_payload = - TransactionPayload::TokenTransfer(contract_addr.clone().into(), 10_000_000, TokenTransferMemo([0; 34])); + TransactionPayload::TokenTransfer(contract_addr.into(), 10_000_000, TokenTransferMemo([0; 34])); let payload_data = tx_payload.serialize_to_vec(); let payload_hex = format!("0x{}", to_hex(&payload_data)); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let body = json!({ "transaction_payload": payload_hex.clone() }); @@ -948,7 +937,7 @@ fn integration_test_get_info() { .json::() .expect("Failed to parse result into JSON"); - eprintln!("{}", res); + eprintln!("{res}"); // destruct the json result // estimated_cost for transfers should be 0 -- their cost is just in their length @@ -975,11 +964,11 @@ fn integration_test_get_info() { .map(|x| x.get("fee").expect("Should have fee field")) .collect(); - assert!(estimated_fee_rates.len() == 3, "Fee rates should be length 3 array"); - assert!(estimated_fees.len() == 3, "Fees should be length 3 array"); + assert_eq!(estimated_fee_rates.len(), 3, "Fee rates should be length 3 array"); + assert_eq!(estimated_fees.len(), 3, "Fees should be length 3 array"); let tx_payload = TransactionPayload::from(TransactionContractCall { - address: contract_addr.clone(), + address: contract_addr, contract_name: "get-info".into(), function_name: "update-info".into(), function_args: vec![], @@ -988,7 +977,7 @@ fn integration_test_get_info() { let payload_data = tx_payload.serialize_to_vec(); let payload_hex = to_hex(&payload_data); - eprintln!("Test: POST {}", path); + eprintln!("Test: POST {path}"); let body = json!({ "transaction_payload": payload_hex.clone() }); @@ -999,7 +988,7 @@ fn integration_test_get_info() { .json::() .expect("Failed to parse result into JSON"); - eprintln!("{}", res); + eprintln!("{res}"); // destruct the json result // estimated_cost for transfers should be non-zero @@ -1026,11 +1015,11 @@ fn integration_test_get_info() { .map(|x| x.get("fee").expect("Should have fee field")) .collect(); - assert!(estimated_fee_rates.len() == 3, "Fee rates should be length 3 array"); - assert!(estimated_fees.len() == 3, "Fees should be length 3 array"); + assert_eq!(estimated_fee_rates.len(), 3, "Fee rates should be length 3 array"); + assert_eq!(estimated_fees.len(), 3, "Fees should be length 3 array"); let tx_payload = TransactionPayload::from(TransactionContractCall { - address: contract_addr.clone(), + address: contract_addr, contract_name: "get-info".into(), function_name: "update-info".into(), function_args: vec![], @@ -1041,7 +1030,7 @@ fn integration_test_get_info() { let estimated_len = 1550; let body = json!({ "transaction_payload": payload_hex.clone(), "estimated_len": estimated_len }); - info!("POST body\n {}", body); + info!("POST body\n {body}"); let res = client.post(&path) .json(&body) @@ -1050,7 +1039,7 @@ fn integration_test_get_info() { .json::() .expect("Failed to parse result into JSON"); - info!("{}", res); + info!("{res}"); // destruct the json result // estimated_cost for transfers should be non-zero @@ -1094,7 +1083,7 @@ fn integration_test_get_info() { run_loop.start(num_rounds).unwrap(); } -const FAUCET_CONTRACT: &'static str = " +const FAUCET_CONTRACT: &str = " (define-public (spout) (let ((recipient tx-sender)) (print (as-contract (stx-transfer? u1 .faucet recipient))))) @@ -1111,7 +1100,7 @@ fn contract_stx_transfer() { conf.burnchain.commit_anchor_block_within = 5000; conf.add_initial_balance(addr_3.to_string(), 100000); conf.add_initial_balance( - to_addr(&StacksPrivateKey::from_hex(&SK_2).unwrap()).to_string(), + to_addr(&StacksPrivateKey::from_hex(SK_2).unwrap()).to_string(), 1000, ); conf.add_initial_balance(to_addr(&contract_sk).to_string(), 1000); @@ -1133,9 +1122,8 @@ fn contract_stx_transfer() { let consensus_hash = chain_tip.metadata.consensus_hash; let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1226,7 +1214,7 @@ fn contract_stx_transfer() { .submit_raw( &mut chainstate_copy, &sortdb, - &consensus_hash, + consensus_hash, &header_hash, tx, &ExecutionCost::max_value(), @@ -1287,30 +1275,27 @@ fn contract_stx_transfer() { .unwrap_err() { MemPoolRejection::ConflictingNonceInMempool => (), - e => panic!("{:?}", e), + e => panic!("{e:?}"), }; } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); match round { 1 => { - assert!(chain_tip.metadata.stacks_block_height == 2); + assert_eq!(chain_tip.metadata.stacks_block_height, 2); // Block #1 should have 2 txs -- coinbase + transfer assert_eq!(chain_tip.block.txs.len(), 2); let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); // check that 1000 stx _was_ transfered to the contract principal @@ -1353,19 +1338,19 @@ fn contract_stx_transfer() { ); } 2 => { - assert!(chain_tip.metadata.stacks_block_height == 3); + assert_eq!(chain_tip.metadata.stacks_block_height, 3); // Block #2 should have 2 txs -- coinbase + publish assert_eq!(chain_tip.block.txs.len(), 2); } 3 => { - assert!(chain_tip.metadata.stacks_block_height == 4); + assert_eq!(chain_tip.metadata.stacks_block_height, 4); // Block #3 should have 2 txs -- coinbase + contract-call, // the second publish _should have been rejected_ assert_eq!(chain_tip.block.txs.len(), 2); // check that 1 stx was transfered to SK_2 via the contract-call let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); @@ -1408,7 +1393,7 @@ fn contract_stx_transfer() { ); } 4 => { - assert!(chain_tip.metadata.stacks_block_height == 5); + assert_eq!(chain_tip.metadata.stacks_block_height, 5); assert_eq!( chain_tip.block.txs.len() as u64, MAXIMUM_MEMPOOL_TX_CHAINING + 1, @@ -1416,7 +1401,7 @@ fn contract_stx_transfer() { ); let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); @@ -1491,9 +1476,8 @@ fn mine_transactions_out_of_order() { let consensus_hash = chain_tip.metadata.consensus_hash; let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1578,16 +1562,13 @@ fn mine_transactions_out_of_order() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1610,7 +1591,7 @@ fn mine_transactions_out_of_order() { // check that 1000 stx _was_ transfered to the contract principal let curr_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); assert_eq!( @@ -1698,15 +1679,14 @@ fn mine_contract_twice() { run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); if round == 2 { let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); // check that the contract published! @@ -1761,9 +1741,8 @@ fn bad_contract_tx_rollback() { let addr_2 = to_addr(&sk_2); let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1872,16 +1851,13 @@ fn bad_contract_tx_rollback() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _burnchain_tip, chain_tip, chain_state, burn_dbconn| { let contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()).to_string(), - "faucet" + "{}.faucet", + to_addr(&StacksPrivateKey::from_hex(SK_1).unwrap()) )) .unwrap(); @@ -1892,7 +1868,7 @@ fn bad_contract_tx_rollback() { assert_eq!(chain_tip.block.txs.len(), 2); let cur_tip = ( - chain_tip.metadata.consensus_hash.clone(), + chain_tip.metadata.consensus_hash, chain_tip.metadata.anchored_header.block_hash(), ); // check that 1000 stx _was_ transfered to the contract principal @@ -1967,10 +1943,8 @@ fn make_expensive_contract(inner_loop: &str, other_decl: &str) -> String { for i in 0..10 { contract.push('\n'); contract.push_str(&format!( - "(define-constant list-{} (concat list-{} list-{}))", + "(define-constant list-{} (concat list-{i} list-{i}))", i + 1, - i, - i )); } @@ -2013,7 +1987,7 @@ fn make_keys(seed: &str, count: u64) -> Vec { fn block_limit_runtime_test() { let mut conf = super::new_test_conf(); - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -2074,7 +2048,7 @@ fn block_limit_runtime_test() { }, network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]); + ])); conf.burnchain.commit_anchor_block_within = 5000; let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); @@ -2083,7 +2057,7 @@ fn block_limit_runtime_test() { let seed = "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447"; let spender_sks = make_keys(seed, 500); for sk in spender_sks.iter() { - conf.add_initial_balance(to_addr(&sk).to_string(), 1000); + conf.add_initial_balance(to_addr(sk).to_string(), 1000); } let num_rounds = 6; @@ -2097,9 +2071,8 @@ fn block_limit_runtime_test() { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&contract_sk), - "hello-contract" + "{}.hello-contract", + to_addr(&contract_sk) )) .unwrap(); let (consensus_hash, block_hash) = ( @@ -2107,45 +2080,15 @@ fn block_limit_runtime_test() { &tenure.parent_block.metadata.anchored_header.block_hash(), ); - if round == 1 { - let publish_tx = make_contract_publish( - &contract_sk, - 0, - 10, - CHAIN_ID_TESTNET, - "hello-contract", - EXPENSIVE_CONTRACT.as_str(), - ); - tenure - .mem_pool - .submit_raw( - &mut chainstate_copy, - &sortdb, - consensus_hash, - block_hash, - publish_tx, - &ExecutionCost::max_value(), - &StacksEpochId::Epoch21, - ) - .unwrap(); - } else if round > 1 { - eprintln!("Begin Round: {}", round); - let to_submit = 2 * (round - 1); - - let seed = "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447"; - let spender_sks = make_keys(seed, 500); - - for i in 0..to_submit { - let sk = &spender_sks[(i + round * round) as usize]; - let tx = make_contract_call( - sk, + match round.cmp(&1) { + Ordering::Equal => { + let publish_tx = make_contract_publish( + &contract_sk, 0, 10, CHAIN_ID_TESTNET, - &to_addr(&contract_sk), "hello-contract", - "do-it", - &[], + EXPENSIVE_CONTRACT.as_str(), ); tenure .mem_pool @@ -2154,24 +2097,55 @@ fn block_limit_runtime_test() { &sortdb, consensus_hash, block_hash, - tx, + publish_tx, &ExecutionCost::max_value(), &StacksEpochId::Epoch21, ) .unwrap(); } - } - - return; + Ordering::Greater => { + eprintln!("Begin Round: {round}"); + let to_submit = 2 * (round - 1); + + let seed = "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447"; + let spender_sks = make_keys(seed, 500); + + for i in 0..to_submit { + let sk = &spender_sks[(i + round * round) as usize]; + let tx = make_contract_call( + sk, + 0, + 10, + CHAIN_ID_TESTNET, + &to_addr(&contract_sk), + "hello-contract", + "do-it", + &[], + ); + tenure + .mem_pool + .submit_raw( + &mut chainstate_copy, + &sortdb, + consensus_hash, + block_hash, + tx, + &ExecutionCost::max_value(), + &StacksEpochId::Epoch21, + ) + .unwrap(); + } + } + Ordering::Less => {} + }; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _chain_state, block, _chain_tip_info, _burn_dbconn| { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&contract_sk), - "hello-contract" + "{}.hello-contract", + to_addr(&contract_sk) )) .unwrap(); @@ -2180,7 +2154,7 @@ fn block_limit_runtime_test() { // Block #1 should have 3 txs -- coinbase + 2 contract calls... assert_eq!(block.block.txs.len(), 3); } - 3 | 4 | 5 => { + 3..=5 => { // Block >= 2 should have 4 txs -- coinbase + 3 contract calls // because the _subsequent_ transactions should never have been // included. @@ -2215,7 +2189,7 @@ fn mempool_errors() { { let mut http_opt = HTTP_BINDING.lock().unwrap(); - http_opt.replace(format!("http://{}", &rpc_bind)); + http_opt.replace(format!("http://{rpc_bind}")); } let mut run_loop = RunLoop::new(conf); @@ -2254,22 +2228,19 @@ fn mempool_errors() { ) .unwrap(); } - - return; }); run_loop.callbacks.on_new_stacks_chain_state( |round, _chain_state, _block, _chain_tip_info, _burn_dbconn| { let contract_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let _contract_identifier = QualifiedContractIdentifier::parse(&format!( - "{}.{}", - to_addr(&contract_sk), - "hello-contract" + "{}.hello-contract", + to_addr(&contract_sk) )) .unwrap(); let http_origin = { HTTP_BINDING.lock().unwrap().clone().unwrap() }; let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let spender_sk = StacksPrivateKey::from_hex(SK_3).unwrap(); let spender_addr = to_addr(&spender_sk); @@ -2277,7 +2248,7 @@ fn mempool_errors() { if round == 1 { // let's submit an invalid transaction! - eprintln!("Test: POST {} (invalid)", path); + eprintln!("Test: POST {path} (invalid)"); let tx_xfer_invalid = make_stacks_transfer( &spender_sk, 30, // bad nonce -- too much chaining @@ -2298,7 +2269,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() @@ -2312,7 +2283,7 @@ fn mempool_errors() { "TooMuchChaining" ); let data = res.get("reason_data").unwrap(); - assert_eq!(data.get("is_origin").unwrap().as_bool().unwrap(), true); + assert!(data.get("is_origin").unwrap().as_bool().unwrap()); assert_eq!( data.get("principal").unwrap().as_str().unwrap(), &spender_addr.to_string() @@ -2340,7 +2311,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() @@ -2374,7 +2345,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() @@ -2419,7 +2390,7 @@ fn mempool_errors() { .json::() .unwrap(); - eprintln!("{}", res); + eprintln!("{res}"); assert_eq!( res.get("txid").unwrap().as_str().unwrap(), tx_xfer_invalid_tx.txid().to_string() diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index b701e70a15..58a526ba30 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -31,13 +31,13 @@ use super::{ use crate::helium::RunLoop; use crate::Keychain; -const FOO_CONTRACT: &'static str = "(define-public (foo) (ok 1)) +const FOO_CONTRACT: &str = "(define-public (foo) (ok 1)) (define-public (bar (x uint)) (ok x))"; -const TRAIT_CONTRACT: &'static str = "(define-trait tr ((value () (response uint uint))))"; -const USE_TRAIT_CONTRACT: &'static str = "(use-trait tr-trait .trait-contract.tr) +const TRAIT_CONTRACT: &str = "(define-trait tr ((value () (response uint uint))))"; +const USE_TRAIT_CONTRACT: &str = "(use-trait tr-trait .trait-contract.tr) (define-public (baz (abc )) (ok (contract-of abc)))"; -const IMPLEMENT_TRAIT_CONTRACT: &'static str = "(define-public (value) (ok u1))"; -const BAD_TRAIT_CONTRACT: &'static str = "(define-public (foo-bar) (ok u1))"; +const IMPLEMENT_TRAIT_CONTRACT: &str = "(define-public (value) (ok u1))"; +const BAD_TRAIT_CONTRACT: &str = "(define-public (foo-bar) (ok u1))"; pub fn make_bad_stacks_transfer( sender: &StacksPrivateKey, @@ -318,17 +318,13 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!( - if let MemPoolRejection::FailedToValidate(ChainstateError::NetError( - NetError::VerifyingError(_), - )) = e - { - true - } else { - false - } - ); + eprintln!("Err: {e:?}"); + assert!(matches!( + e, + MemPoolRejection::FailedToValidate(ChainstateError::NetError( + NetError::VerifyingError(_) + )) + )); // mismatched network on contract-call! let bad_addr = StacksAddress::from_public_keys( @@ -337,8 +333,7 @@ fn mempool_setup_chainstate() { 1, &vec![StacksPublicKey::from_private(&other_sk)], ) - .unwrap() - .into(); + .unwrap(); let tx_bytes = make_contract_call( &contract_sk, @@ -362,11 +357,7 @@ fn mempool_setup_chainstate() { ) .unwrap_err(); - assert!(if let MemPoolRejection::BadAddressVersionByte = e { - true - } else { - false - }); + assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // mismatched network on transfer! let bad_addr = StacksAddress::from_public_keys( @@ -391,11 +382,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - assert!(if let MemPoolRejection::BadAddressVersionByte = e { - true - } else { - false - }); + assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // bad fees let tx_bytes = @@ -411,12 +398,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::FeeTooLow(0, _) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::FeeTooLow(0, _))); // bad nonce let tx_bytes = @@ -432,12 +415,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadNonces(_) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadNonces(_))); // not enough funds let tx_bytes = make_stacks_transfer( @@ -459,15 +438,11 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NotEnoughFunds(111000, 99500) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NotEnoughFunds(111000, 99500))); // sender == recipient - let contract_princ = PrincipalData::from(contract_addr.clone()); + let contract_princ = PrincipalData::from(contract_addr); let tx_bytes = make_stacks_transfer( &contract_sk, 5, @@ -487,7 +462,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(if let MemPoolRejection::TransferRecipientIsSender(r) = e { r == contract_princ } else { @@ -517,15 +492,11 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadAddressVersionByte = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadAddressVersionByte)); // tx version must be testnet - let contract_princ = PrincipalData::from(contract_addr.clone()); + let contract_princ = PrincipalData::from(contract_addr); let payload = TransactionPayload::TokenTransfer( contract_princ.clone(), 1000, @@ -551,12 +522,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadTransactionVersion = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadTransactionVersion)); // send amount must be positive let tx_bytes = @@ -572,12 +539,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::TransferAmountMustBePositive = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::TransferAmountMustBePositive)); // not enough funds let tx_bytes = make_stacks_transfer( @@ -599,12 +562,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NotEnoughFunds(111000, 99500) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NotEnoughFunds(111000, 99500))); let tx_bytes = make_stacks_transfer( &contract_sk, @@ -625,12 +584,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NotEnoughFunds(100700, 99500) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NotEnoughFunds(100700, 99500))); let tx_bytes = make_contract_call( &contract_sk, @@ -653,12 +608,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NoSuchContract = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NoSuchContract)); let tx_bytes = make_contract_call( &contract_sk, @@ -681,12 +632,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NoSuchPublicFunction = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NoSuchPublicFunction)); let tx_bytes = make_contract_call( &contract_sk, @@ -709,12 +656,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::BadFunctionArgument(_) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::BadFunctionArgument(_))); let tx_bytes = make_contract_publish( &contract_sk, @@ -735,12 +678,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::ContractAlreadyExists(_) = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::ContractAlreadyExists(_))); let microblock_1 = StacksMicroblockHeader { version: 0, @@ -777,13 +716,13 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let microblock_1 = StacksMicroblockHeader { version: 0, sequence: 0, - prev_block: block_hash.clone(), + prev_block: *block_hash, tx_merkle_root: Sha512Trunc256Sum::from_data(&[]), signature: MessageSignature([0; 65]), }; @@ -791,7 +730,7 @@ fn mempool_setup_chainstate() { let microblock_2 = StacksMicroblockHeader { version: 0, sequence: 0, - prev_block: block_hash.clone(), + prev_block: *block_hash, tx_merkle_root: Sha512Trunc256Sum::from_data(&[1, 2, 3]), signature: MessageSignature([0; 65]), }; @@ -815,7 +754,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let mut microblock_1 = StacksMicroblockHeader { @@ -856,7 +795,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let tx_bytes = make_coinbase(&contract_sk, 5, 1000, CHAIN_ID_TESTNET); @@ -871,12 +810,8 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); - assert!(if let MemPoolRejection::NoCoinbaseViaMempool = e { - true - } else { - false - }); + eprintln!("Err: {e:?}"); + assert!(matches!(e, MemPoolRejection::NoCoinbaseViaMempool)); // find the correct priv-key let mut secret_key = None; @@ -936,12 +871,12 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - eprintln!("Err: {:?}", e); + eprintln!("Err: {e:?}"); assert!(matches!(e, MemPoolRejection::Other(_))); let contract_id = QualifiedContractIdentifier::new( - StandardPrincipalData::from(contract_addr.clone()), - ContractName::try_from("implement-trait-contract").unwrap(), + StandardPrincipalData::from(contract_addr), + ContractName::from("implement-trait-contract"), ); let contract_principal = PrincipalData::Contract(contract_id.clone()); @@ -968,8 +903,8 @@ fn mempool_setup_chainstate() { .unwrap(); let contract_id = QualifiedContractIdentifier::new( - StandardPrincipalData::from(contract_addr.clone()), - ContractName::try_from("bad-trait-contract").unwrap(), + StandardPrincipalData::from(contract_addr), + ContractName::from("bad-trait-contract"), ); let contract_principal = PrincipalData::Contract(contract_id.clone()); @@ -994,11 +929,7 @@ fn mempool_setup_chainstate() { tx_bytes.len() as u64, ) .unwrap_err(); - assert!(if let MemPoolRejection::BadFunctionArgument(_) = e { - true - } else { - false - }); + assert!(matches!(e, MemPoolRejection::BadFunctionArgument(_))); } }, ); diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 2c555e7232..6f02ecf138 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -81,11 +81,11 @@ pub const STORE_CONTRACT: &str = r#"(define-map store { key: (string-ascii 32) } (ok true)))"#; // ./blockstack-cli --testnet publish 043ff5004e3d695060fa48ac94c96049b8c14ef441c50a184a6a3875d2a000f3 0 0 store /tmp/out.clar -pub const SK_1: &'static str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; -pub const SK_2: &'static str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; -pub const SK_3: &'static str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; +pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; +pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; +pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; -pub const ADDR_4: &'static str = "ST31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZZ239N96"; +pub const ADDR_4: &str = "ST31DA6FTSJX2WGTZ69SFY11BH51NZMB0ZZ239N96"; lazy_static! { pub static ref PUBLISH_CONTRACT: Vec = make_contract_publish( @@ -133,6 +133,7 @@ pub fn insert_new_port(port: u16) -> bool { ports.insert(port) } +#[allow(clippy::too_many_arguments)] pub fn serialize_sign_sponsored_sig_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, @@ -215,6 +216,7 @@ pub fn serialize_sign_standard_single_sig_tx_anchor_mode_version( ) } +#[allow(clippy::too_many_arguments)] pub fn serialize_sign_tx_anchor_mode_version( payload: TransactionPayload, sender: &StacksPrivateKey, @@ -401,10 +403,10 @@ pub fn set_random_binds(config: &mut Config) { let rpc_port = gen_random_port(); let p2p_port = gen_random_port(); let localhost = "127.0.0.1"; - config.node.rpc_bind = format!("{}:{}", localhost, rpc_port); - config.node.p2p_bind = format!("{}:{}", localhost, p2p_port); - config.node.data_url = format!("http://{}:{}", localhost, rpc_port); - config.node.p2p_address = format!("{}:{}", localhost, p2p_port); + config.node.rpc_bind = format!("{localhost}:{rpc_port}"); + config.node.p2p_bind = format!("{localhost}:{p2p_port}"); + config.node.data_url = format!("http://{localhost}:{rpc_port}"); + config.node.p2p_address = format!("{localhost}:{p2p_port}"); } pub fn to_addr(sk: &StacksPrivateKey) -> StacksAddress { @@ -427,9 +429,10 @@ pub fn make_stacks_transfer( ) -> Vec { let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } +#[allow(clippy::too_many_arguments)] pub fn make_sponsored_stacks_transfer_on_testnet( sender: &StacksPrivateKey, payer: &StacksPrivateKey, @@ -443,7 +446,7 @@ pub fn make_sponsored_stacks_transfer_on_testnet( let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); serialize_sign_sponsored_sig_tx_anchor_mode_version( - payload.into(), + payload, sender, payer, sender_nonce, @@ -466,7 +469,7 @@ pub fn make_stacks_transfer_mblock_only( let payload = TransactionPayload::TokenTransfer(recipient.clone(), amount, TokenTransferMemo([0; 34])); serialize_sign_standard_single_sig_tx_anchor_mode( - payload.into(), + payload, sender, nonce, tx_fee, @@ -484,14 +487,15 @@ pub fn make_poison( header_2: StacksMicroblockHeader, ) -> Vec { let payload = TransactionPayload::PoisonMicroblock(header_1, header_2); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64, chain_id: u32) -> Vec { let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); - serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) + serialize_sign_standard_single_sig_tx(payload, sender, nonce, tx_fee, chain_id) } +#[allow(clippy::too_many_arguments)] pub fn make_contract_call( sender: &StacksPrivateKey, nonce: u64, @@ -506,15 +510,16 @@ pub fn make_contract_call( let function_name = ClarityName::from(function_name); let payload = TransactionContractCall { - address: contract_addr.clone(), + address: *contract_addr, contract_name, function_name, - function_args: function_args.iter().map(|x| x.clone()).collect(), + function_args: function_args.to_vec(), }; serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee, chain_id) } +#[allow(clippy::too_many_arguments)] pub fn make_contract_call_mblock_only( sender: &StacksPrivateKey, nonce: u64, @@ -529,10 +534,10 @@ pub fn make_contract_call_mblock_only( let function_name = ClarityName::from(function_name); let payload = TransactionContractCall { - address: contract_addr.clone(), + address: *contract_addr, contract_name, function_name, - function_args: function_args.iter().map(|x| x.clone()).collect(), + function_args: function_args.to_vec(), }; serialize_sign_standard_single_sig_tx_anchor_mode( @@ -558,7 +563,7 @@ fn make_microblock( let mut microblock_builder = StacksMicroblockBuilder::new( block.block_hash(), - consensus_hash.clone(), + consensus_hash, chainstate, burn_dbconn, BlockBuilderSettings::max_value(), @@ -576,10 +581,9 @@ fn make_microblock( // NOTE: we intentionally do not check the block's microblock pubkey hash against the private // key, because we may need to test that microblocks get rejected due to bad signatures. - let microblock = microblock_builder + microblock_builder .mine_next_microblock_from_txs(mempool_txs, privk) - .unwrap(); - microblock + .unwrap() } /// Deserializes the `StacksTransaction` objects from `blocks` and returns all those that @@ -601,7 +605,7 @@ pub fn select_transactions_where( } } - return result; + result } /// This function will call `next_block_and_wait` until the burnchain height underlying `BitcoinRegtestController` @@ -614,20 +618,19 @@ pub fn run_until_burnchain_height( target_height: u64, conf: &Config, ) -> bool { - let tip_info = get_chain_info(&conf); + let tip_info = get_chain_info(conf); let mut current_height = tip_info.burn_block_height; while current_height < target_height { eprintln!( - "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({})", - get_epoch_time_secs(), - current_height + "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({current_height})", + get_epoch_time_secs() ); - let next_result = next_block_and_wait(btc_regtest_controller, &blocks_processed); + let next_result = next_block_and_wait(btc_regtest_controller, blocks_processed); if !next_result { return false; } - let tip_info = get_chain_info(&conf); + let tip_info = get_chain_info(conf); current_height = tip_info.burn_block_height; } @@ -717,7 +720,6 @@ fn should_succeed_mining_valid_txs() { }, _ => {} }; - return }); // Use block's hook for asserting expectations @@ -743,18 +745,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the smart contract published let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::SmartContract(..) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::SmartContract(..) + )); // 0 event should have been produced let events: Vec = chain_tip @@ -762,7 +764,7 @@ fn should_succeed_mining_valid_txs() { .iter() .flat_map(|a| a.events.clone()) .collect(); - assert!(events.len() == 0); + assert!(events.is_empty()); } 2 => { // Inspecting the chain at round 2. @@ -775,18 +777,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the get-value contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); // 2 lockup events should have been produced let events: Vec = chain_tip @@ -807,18 +809,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the set-value contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); // 2 lockup events + 1 contract event should have been produced let events: Vec = chain_tip @@ -832,7 +834,7 @@ fn should_succeed_mining_valid_txs() { format!("{}", data.key.0) == "STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A.store" && data.key.1 == "print" - && format!("{}", data.value) == "\"Setting key foo\"".to_string() + && format!("{}", data.value) == "\"Setting key foo\"" } _ => false, }); @@ -848,18 +850,18 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the get-value contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); // 1 event should have been produced let events: Vec = chain_tip @@ -873,7 +875,7 @@ fn should_succeed_mining_valid_txs() { format!("{}", data.key.0) == "STGT7GSMZG7EA0TS6MVSKT5JC1DCDFGZWJJZXN8A.store" && data.key.1 == "print" - && format!("{}", data.value) == "\"Getting key foo\"".to_string() + && format!("{}", data.value) == "\"Getting key foo\"" } _ => false, }); @@ -889,19 +891,19 @@ fn should_succeed_mining_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the STX transfer let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::TokenTransfer(_, _, _) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::TokenTransfer(_, _, _) + )); // 1 event should have been produced let events: Vec = chain_tip @@ -996,7 +998,6 @@ fn should_succeed_handling_malformed_and_valid_txs() { }, _ => {} }; - return }); // Use block's hook for asserting expectations @@ -1014,10 +1015,10 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); } 1 => { // Inspecting the chain at round 1. @@ -1030,18 +1031,18 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the smart contract published let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::SmartContract(..) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::SmartContract(..) + )); } 2 => { // Inspecting the chain at round 2. @@ -1054,10 +1055,10 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); } 3 => { // Inspecting the chain at round 3. @@ -1070,10 +1071,10 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); } 4 => { // Inspecting the chain at round 4. @@ -1086,18 +1087,18 @@ fn should_succeed_handling_malformed_and_valid_txs() { // Transaction #1 should be the coinbase from the leader let coinbase_tx = &chain_tip.block.txs[0]; assert!(coinbase_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match coinbase_tx.payload { - TransactionPayload::Coinbase(..) => true, - _ => false, - }); + assert!(matches!( + coinbase_tx.payload, + TransactionPayload::Coinbase(..) + )); // Transaction #2 should be the contract-call let contract_tx = &chain_tip.block.txs[1]; assert!(contract_tx.chain_id == CHAIN_ID_TESTNET); - assert!(match contract_tx.payload { - TransactionPayload::ContractCall(_) => true, - _ => false, - }); + assert!(matches!( + contract_tx.payload, + TransactionPayload::ContractCall(_) + )); } _ => {} } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 90334cce9b..ef6199d331 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -37,8 +37,9 @@ use stacks::chainstate::burn::operations::{ }; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::coordinator::OnChainRewardSetProvider; -use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; +use stacks::chainstate::nakamoto::coordinator::{load_nakamoto_reward_set, TEST_COORDINATOR_STALL}; use stacks::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use stacks::chainstate::nakamoto::shadow::shadow_chainstate_repair; use stacks::chainstate::nakamoto::test_signers::TestSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use stacks::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; @@ -57,7 +58,7 @@ use stacks::chainstate::stacks::{ }; use stacks::core::mempool::MAXIMUM_MEMPOOL_TX_CHAINING; use stacks::core::{ - StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, + EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_TESTNET, @@ -90,6 +91,7 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; +use stacks_signer::v0::SpawnedSigner; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, InitialBalance}; @@ -102,11 +104,12 @@ use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ call_read_only, get_account, get_account_result, get_chain_info_opt, get_chain_info_result, get_neighbors, get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, - test_observer, wait_for_runloop, + submit_tx_fallible, test_observer, wait_for_runloop, }; +use crate::tests::signer::SignerTest; use crate::tests::{ - gen_random_port, get_chain_info, make_contract_publish, make_contract_publish_versioned, - make_stacks_transfer, to_addr, + gen_random_port, get_chain_info, make_contract_call, make_contract_publish, + make_contract_publish_versioned, make_stacks_transfer, to_addr, }; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; @@ -200,9 +203,7 @@ impl TestSigningChannel { /// TODO: update to use signatures vec pub fn get_signature() -> Option> { let mut signer = TEST_SIGNING.lock().unwrap(); - let Some(sign_channels) = signer.as_mut() else { - return None; - }; + let sign_channels = signer.as_mut()?; let recv = sign_channels.recv.take().unwrap(); drop(signer); // drop signer so we don't hold the lock while receiving. let signatures = recv.recv_timeout(Duration::from_secs(30)).unwrap(); @@ -362,7 +363,7 @@ pub fn blind_signer_multinode( thread::sleep(Duration::from_secs(2)); info!("Checking for a block proposal to sign..."); last_count = cur_count; - let configs: Vec<&Config> = configs.iter().map(|x| x).collect(); + let configs: Vec<&Config> = configs.iter().collect(); match read_and_sign_block_proposal(configs.as_slice(), &signers, &signed_blocks, &sender) { Ok(signed_block) => { if signed_blocks.contains(&signed_block) { @@ -427,10 +428,12 @@ pub fn get_latest_block_proposal( .collect(); proposed_blocks.sort_by(|(block_a, _, is_latest_a), (block_b, _, is_latest_b)| { - if block_a.header.chain_length > block_b.header.chain_length { - return std::cmp::Ordering::Greater; - } else if block_a.header.chain_length < block_b.header.chain_length { - return std::cmp::Ordering::Less; + let res = block_a + .header + .chain_length + .cmp(&block_b.header.chain_length); + if res != std::cmp::Ordering::Equal { + return res; } // the heights are tied, tie break with the latest miner if *is_latest_a { @@ -439,7 +442,7 @@ pub fn get_latest_block_proposal( if *is_latest_b { return std::cmp::Ordering::Less; } - return std::cmp::Ordering::Equal; + std::cmp::Ordering::Equal }); for (b, _, is_latest) in proposed_blocks.iter() { @@ -542,7 +545,7 @@ pub fn read_and_sign_block_proposal( channel .send(proposed_block.header.signer_signature) .unwrap(); - return Ok(signer_sig_hash); + Ok(signer_sig_hash) } /// Return a working nakamoto-neon config and the miner's bitcoin address to fund @@ -552,7 +555,7 @@ pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress conf.burnchain.mode = "nakamoto-neon".into(); // tests can override this, but these tests run with epoch 2.05 by default - conf.burnchain.epochs = Some(NAKAMOTO_INTEGRATION_EPOCHS.to_vec()); + conf.burnchain.epochs = Some(EpochList::new(&*NAKAMOTO_INTEGRATION_EPOCHS)); if let Some(seed) = seed { conf.node.seed = seed.to_vec(); @@ -585,12 +588,12 @@ pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress burnchain.peer_host = Some("127.0.0.1".to_string()); } - conf.burnchain.magic_bytes = MagicBytes::from(['T' as u8, '3' as u8].as_ref()); + conf.burnchain.magic_bytes = MagicBytes::from([b'T', b'3'].as_ref()); conf.burnchain.poll_time_secs = 1; conf.node.pox_sync_sample_secs = 0; - conf.miner.first_attempt_time_ms = i64::max_value() as u64; - conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + conf.miner.first_attempt_time_ms = i64::MAX as u64; + conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; // if there's just one node, then this must be true for tests to pass conf.miner.wait_for_block_download = false; @@ -709,7 +712,7 @@ pub fn next_block_and_wait_for_commits( coord_channels: &[&Arc>], commits_submitted: &[&Arc], ) -> Result<(), String> { - let commits_submitted: Vec<_> = commits_submitted.iter().cloned().collect(); + let commits_submitted: Vec<_> = commits_submitted.to_vec(); let blocks_processed_before: Vec<_> = coord_channels .iter() .map(|x| { @@ -786,7 +789,7 @@ pub fn setup_stacker(naka_conf: &mut Config) -> Secp256k1PrivateKey { let stacker_sk = Secp256k1PrivateKey::new(); let stacker_address = tests::to_addr(&stacker_sk); naka_conf.add_initial_balance( - PrincipalData::from(stacker_address.clone()).to_string(), + PrincipalData::from(stacker_address).to_string(), POX_4_DEFAULT_STACKER_BALANCE, ); stacker_sk @@ -806,24 +809,24 @@ pub fn boot_to_epoch_3( assert_eq!(stacker_sks.len(), signer_sks.len()); let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3 = &epochs[StacksEpochId::Epoch30]; let current_height = btc_regtest_controller.get_headers_height(); info!( "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - next_block_and_wait(btc_regtest_controller, &blocks_processed); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // first mined stacks block - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); let start_time = Instant::now(); loop { if start_time.elapsed() > Duration::from_secs(20) { panic!("Timed out waiting for the stacks height to increment") } - let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + let stacks_height = get_chain_info(naka_conf).stacks_tip_height; if stacks_height >= 1 { break; } @@ -840,13 +843,13 @@ pub fn boot_to_epoch_3( for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, naka_conf.burnchain.chain_id, @@ -860,7 +863,7 @@ pub fn boot_to_epoch_3( let signer_pk = StacksPublicKey::from_private(signer_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, naka_conf.burnchain.chain_id, @@ -900,9 +903,9 @@ pub fn boot_to_epoch_3( // Run until the prepare phase run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, reward_set_calculation, - &naka_conf, + naka_conf, ); // We need to vote on the aggregate public key if this test is self signing @@ -943,9 +946,9 @@ pub fn boot_to_epoch_3( run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_3.start_height - 1, - &naka_conf, + naka_conf, ); info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); @@ -968,24 +971,24 @@ pub fn boot_to_pre_epoch_3_boundary( assert_eq!(stacker_sks.len(), signer_sks.len()); let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3 = &epochs[StacksEpochId::Epoch30]; let current_height = btc_regtest_controller.get_headers_height(); info!( "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; "Epoch 3.0 Boundary" => (epoch_3.start_height - 1), ); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - next_block_and_wait(btc_regtest_controller, &blocks_processed); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // first mined stacks block - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); let start_time = Instant::now(); loop { if start_time.elapsed() > Duration::from_secs(20) { panic!("Timed out waiting for the stacks height to increment") } - let stacks_height = get_chain_info(&naka_conf).stacks_tip_height; + let stacks_height = get_chain_info(naka_conf).stacks_tip_height; if stacks_height >= 1 { break; } @@ -1002,13 +1005,13 @@ pub fn boot_to_pre_epoch_3_boundary( for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, naka_conf.burnchain.chain_id, @@ -1022,7 +1025,7 @@ pub fn boot_to_pre_epoch_3_boundary( let signer_pk = StacksPublicKey::from_private(signer_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, naka_conf.burnchain.chain_id, @@ -1062,9 +1065,9 @@ pub fn boot_to_pre_epoch_3_boundary( // Run until the prepare phase run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, reward_set_calculation, - &naka_conf, + naka_conf, ); // We need to vote on the aggregate public key if this test is self signing @@ -1105,9 +1108,9 @@ pub fn boot_to_pre_epoch_3_boundary( run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_3.start_height - 2, - &naka_conf, + naka_conf, ); info!("Bootstrapped to one block before Epoch 3.0 boundary, Epoch 2.x miner should continue for one more block"); @@ -1191,7 +1194,7 @@ pub fn is_key_set_for_cycle( is_mainnet: bool, http_origin: &str, ) -> Result { - let key = get_key_for_cycle(reward_cycle, is_mainnet, &http_origin)?; + let key = get_key_for_cycle(reward_cycle, is_mainnet, http_origin)?; Ok(key.is_some()) } @@ -1206,7 +1209,7 @@ pub fn setup_epoch_3_reward_set( assert_eq!(stacker_sks.len(), signer_sks.len()); let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3 = &epochs[StacksEpochId::Epoch30]; let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; @@ -1218,10 +1221,10 @@ pub fn setup_epoch_3_reward_set( let epoch_3_reward_cycle_boundary = epoch_3_start_height.saturating_sub(epoch_3_start_height % reward_cycle_len); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - next_block_and_wait(btc_regtest_controller, &blocks_processed); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // first mined stacks block - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); // stack enough to activate pox-4 let block_height = btc_regtest_controller.get_headers_height(); @@ -1241,13 +1244,13 @@ pub fn setup_epoch_3_reward_set( for (stacker_sk, signer_sk) in stacker_sks.iter().zip(signer_sks.iter()) { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &signer_sk, + signer_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, naka_conf.burnchain.chain_id, @@ -1260,7 +1263,7 @@ pub fn setup_epoch_3_reward_set( let signer_pk = StacksPublicKey::from_private(signer_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, naka_conf.burnchain.chain_id, @@ -1305,7 +1308,7 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( ); let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3 = &epochs[StacksEpochId::Epoch30]; let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; @@ -1322,9 +1325,9 @@ pub fn boot_to_epoch_3_reward_set_calculation_boundary( run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_3_reward_set_calculation_boundary, - &naka_conf, + naka_conf, ); info!("Bootstrapped to Epoch 3.0 reward set calculation boundary height: {epoch_3_reward_set_calculation_boundary}."); @@ -1340,7 +1343,7 @@ pub fn boot_to_epoch_25( btc_regtest_controller: &mut BitcoinRegtestController, ) { let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_25 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap()]; + let epoch_25 = &epochs[StacksEpochId::Epoch25]; let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; @@ -1364,9 +1367,9 @@ pub fn boot_to_epoch_25( ); run_until_burnchain_height( btc_regtest_controller, - &blocks_processed, + blocks_processed, epoch_25_start_height, - &naka_conf, + naka_conf, ); info!("Bootstrapped to Epoch 2.5: {epoch_25_start_height}."); } @@ -1391,7 +1394,7 @@ pub fn boot_to_epoch_3_reward_set( btc_regtest_controller, num_stacking_cycles, ); - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); info!( "Bootstrapped to Epoch 3.0 reward set calculation height: {}", get_chain_info(naka_conf).burn_block_height @@ -1426,7 +1429,7 @@ fn simple_neon_integration() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(5); let sender_sk = Secp256k1PrivateKey::new(); @@ -1435,16 +1438,13 @@ fn simple_neon_integration() { let send_amt = 1000; let send_fee = 100; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt * 2 + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -1502,7 +1502,7 @@ fn simple_neon_integration() { #[cfg(feature = "monitoring_prom")] { wait_for(10, || { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); let client = reqwest::blocking::Client::new(); let res = client .get(&prom_http_origin) @@ -1569,8 +1569,7 @@ fn simple_neon_integration() { .as_array() .unwrap() .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() + .any(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) }); Ok(transfer_tx_included) }) @@ -1598,17 +1597,13 @@ fn simple_neon_integration() { ); // assert that the transfer tx was observed - let transfer_tx_included = test_observer::get_blocks() - .into_iter() - .find(|block_json| { - block_json["transactions"] - .as_array() - .unwrap() - .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() - }) - .is_some(); + let transfer_tx_included = test_observer::get_blocks().into_iter().any(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .any(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + }); assert!( transfer_tx_included, @@ -1626,7 +1621,7 @@ fn simple_neon_integration() { #[cfg(feature = "monitoring_prom")] { wait_for(10, || { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); let client = reqwest::blocking::Client::new(); let res = client .get(&prom_http_origin) @@ -1639,10 +1634,8 @@ fn simple_neon_integration() { tip.stacks_block_height ); - let expected_result_2 = format!( - "stacks_node_stacks_tip_height {}", - tip.stacks_block_height - 1 - ); + let expected_result_2 = + format!("stacks_node_stacks_tip_height {}", tip.stacks_block_height); Ok(res.contains(&expected_result_1) && res.contains(&expected_result_2)) }) .expect("Prometheus metrics did not update"); @@ -1676,7 +1669,7 @@ fn flash_blocks_on_epoch_3() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let sender_sk = Secp256k1PrivateKey::new(); @@ -1685,16 +1678,13 @@ fn flash_blocks_on_epoch_3() { let send_amt = 1000; let send_fee = 100; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt * 2 + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -1852,17 +1842,13 @@ fn flash_blocks_on_epoch_3() { ); // assert that the transfer tx was observed - let transfer_tx_included = test_observer::get_blocks() - .into_iter() - .find(|block_json| { - block_json["transactions"] - .as_array() - .unwrap() - .iter() - .find(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) - .is_some() - }) - .is_some(); + let transfer_tx_included = test_observer::get_blocks().into_iter().any(|block_json| { + block_json["transactions"] + .as_array() + .unwrap() + .iter() + .any(|tx_json| tx_json["raw_tx"].as_str() == Some(&transfer_tx_hex)) + }); assert!( transfer_tx_included, @@ -1878,7 +1864,7 @@ fn flash_blocks_on_epoch_3() { // Get the Epoch 3.0 activation height (in terms of Bitcoin block height) let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3 = &epochs[StacksEpochId::Epoch30]; let epoch_3_start_height = epoch_3.start_height; // Find the gap in burn blocks @@ -1895,22 +1881,16 @@ fn flash_blocks_on_epoch_3() { } } - // Verify that there's a gap of exactly 3 blocks - assert_eq!( - gap_end - gap_start + 1, - 3, - "Expected a gap of exactly 3 burn blocks due to flash blocks, found gap from {} to {}", - gap_start, - gap_end + // Verify that there's a gap of AT LEAST 3 blocks + assert!( + gap_end - gap_start + 1 >= 3, + "Expected a gap of AT LEAST 3 burn blocks due to flash blocks, found gap from {gap_start} to {gap_end}" ); // Verify that the gap includes the Epoch 3.0 activation height assert!( gap_start <= epoch_3_start_height && epoch_3_start_height <= gap_end, - "Expected the gap ({}..={}) to include the Epoch 3.0 activation height ({})", - gap_start, - gap_end, - epoch_3_start_height + "Expected the gap ({gap_start}..={gap_end}) to include the Epoch 3.0 activation height ({epoch_3_start_height})" ); // Verify blocks before and after the gap @@ -1919,7 +1899,7 @@ fn flash_blocks_on_epoch_3() { check_nakamoto_empty_block_heuristics(); info!("Verified burn block ranges, including expected gap for flash blocks"); - info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {}", epoch_3_start_height); + info!("Confirmed that the gap includes the Epoch 3.0 activation height (Bitcoin block height): {epoch_3_start_height}"); coord_channel .lock() @@ -1958,13 +1938,10 @@ fn mine_multiple_per_tenure_integration() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -1994,7 +1971,7 @@ fn mine_multiple_per_tenure_integration() { .spawn(move || run_loop.start(None, 0)) .unwrap(); wait_for_runloop(&blocks_processed); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); boot_to_epoch_3( &naka_conf, &blocks_processed, @@ -2029,7 +2006,7 @@ fn mine_multiple_per_tenure_integration() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - debug!("Mining tenure {}", tenure_ix); + debug!("Mining tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -2146,22 +2123,19 @@ fn multiple_miners() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let mut conf_node_2 = naka_conf.clone(); let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); conf_node_2.node.seed = vec![2, 2, 2, 2]; conf_node_2.burnchain.local_mining_public_key = Some( Keychain::default(conf_node_2.node.seed.clone()) @@ -2176,7 +2150,7 @@ fn multiple_miners() { let node_1_sk = Secp256k1PrivateKey::from_seed(&naka_conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), naka_conf.node.p2p_bind), @@ -2244,7 +2218,7 @@ fn multiple_miners() { .unwrap(); wait_for_runloop(&blocks_processed); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); boot_to_epoch_3( &naka_conf, &blocks_processed, @@ -2287,7 +2261,7 @@ fn multiple_miners() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - info!("Mining tenure {}", tenure_ix); + info!("Mining tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -2387,13 +2361,10 @@ fn correct_burn_outs() { { let epochs = naka_conf.burnchain.epochs.as_mut().unwrap(); - let epoch_24_ix = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch24).unwrap(); - let epoch_25_ix = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap(); - let epoch_30_ix = StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap(); - epochs[epoch_24_ix].end_height = 208; - epochs[epoch_25_ix].start_height = 208; - epochs[epoch_25_ix].end_height = 225; - epochs[epoch_30_ix].start_height = 225; + epochs[StacksEpochId::Epoch24].end_height = 208; + epochs[StacksEpochId::Epoch25].start_height = 208; + epochs[StacksEpochId::Epoch25].end_height = 225; + epochs[StacksEpochId::Epoch30].start_height = 225; } naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -2412,10 +2383,7 @@ fn correct_burn_outs() { let stacker_accounts = accounts[0..3].to_vec(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let signers = TestSigners::new(vec![sender_signer_sk]); @@ -2447,8 +2415,8 @@ fn correct_burn_outs() { wait_for_runloop(&blocks_processed); let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - let epoch_25 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap()]; + let epoch_3 = &epochs[StacksEpochId::Epoch30]; + let epoch_25 = &epochs[StacksEpochId::Epoch25]; let current_height = btc_regtest_controller.get_headers_height(); info!( "Chain bootstrapped to bitcoin block {current_height:?}, starting Epoch 2x miner"; @@ -2504,7 +2472,7 @@ fn correct_burn_outs() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&account.0).bytes, + tests::to_addr(account.0).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); @@ -2525,7 +2493,7 @@ fn correct_burn_outs() { .to_rsv(); let stacking_tx = tests::make_contract_call( - &account.0, + account.0, account.2.nonce, 1000, naka_conf.burnchain.chain_id, @@ -2587,7 +2555,7 @@ fn correct_burn_outs() { .block_height_to_reward_cycle(epoch_3.start_height) .unwrap(); - info!("first_epoch_3_cycle: {:?}", first_epoch_3_cycle); + info!("first_epoch_3_cycle: {first_epoch_3_cycle:?}"); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); let stacker_response = get_stacker_set(&http_origin, first_epoch_3_cycle).unwrap(); @@ -2733,10 +2701,7 @@ fn block_proposal_api_endpoint() { let stacker_sk = setup_stacker(&mut conf); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); // only subscribe to the block proposal events test_observer::spawn(); @@ -2761,7 +2726,7 @@ fn block_proposal_api_endpoint() { let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); wait_for_runloop(&blocks_processed); boot_to_epoch_3( &conf, @@ -2815,7 +2780,7 @@ fn block_proposal_api_endpoint() { .unwrap() .unwrap(); - let privk = conf.miner.mining_key.unwrap().clone(); + let privk = conf.miner.mining_key.unwrap(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()) .expect("Failed to get sortition tip"); let db_handle = sortdb.index_handle(&sort_tip); @@ -2855,6 +2820,7 @@ fn block_proposal_api_endpoint() { tenure_change, coinbase, 1, + None, ) .expect("Failed to build Nakamoto block"); @@ -2911,41 +2877,41 @@ fn block_proposal_api_endpoint() { ("Must wait", sign(&proposal), HTTP_TOO_MANY, None), ( "Non-canonical or absent tenure", - (|| { + { let mut sp = sign(&proposal); sp.block.header.consensus_hash.0[3] ^= 0x07; sp - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::NonCanonicalTenure)), ), ( "Corrupted (bit flipped after signing)", - (|| { + { let mut sp = sign(&proposal); sp.block.header.timestamp ^= 0x07; sp - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::ChainstateError)), ), ( "Invalid `chain_id`", - (|| { + { let mut p = proposal.clone(); p.chain_id ^= 0xFFFFFFFF; sign(&p) - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::InvalidBlock)), ), ( "Invalid `miner_signature`", - (|| { + { let mut sp = sign(&proposal); sp.block.header.miner_signature.0[1] ^= 0x80; sp - })(), + }, HTTP_ACCEPTED, Some(Err(ValidateRejectCode::ChainstateError)), ), @@ -3043,10 +3009,7 @@ fn block_proposal_api_endpoint() { .iter() .zip(proposal_responses.iter()) { - info!( - "Received response {:?}, expecting {:?}", - &response, &expected_response - ); + info!("Received response {response:?}, expecting {expected_response:?}"); match expected_response { Ok(_) => { assert!(matches!(response, BlockValidateResponse::Ok(_))); @@ -3094,19 +3057,16 @@ fn miner_writes_proposed_block_to_stackerdb() { let send_amt = 1000; let send_fee = 100; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); let stacker_sk = setup_stacker(&mut naka_conf); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); test_observer::spawn(); test_observer::register( @@ -3217,9 +3177,9 @@ fn vote_for_aggregate_key_burn_op() { let signer_sk = Secp256k1PrivateKey::new(); let signer_addr = tests::to_addr(&signer_sk); - let mut signers = TestSigners::new(vec![signer_sk.clone()]); + let mut signers = TestSigners::new(vec![signer_sk]); - naka_conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); + naka_conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -3278,7 +3238,7 @@ fn vote_for_aggregate_key_burn_op() { let mut miner_signer = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting pre-stx op"); let pre_stx_op = PreStxOp { - output: signer_addr.clone(), + output: signer_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -3344,13 +3304,13 @@ fn vote_for_aggregate_key_burn_op() { let stacker_pk = StacksPublicKey::from_private(&stacker_sk); let signer_key: StacksPublicKeyBuffer = stacker_pk.to_bytes_compressed().as_slice().into(); - let aggregate_key = signer_key.clone(); + let aggregate_key = signer_key; let vote_for_aggregate_key_op = BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { signer_key, signer_index, - sender: signer_addr.clone(), + sender: signer_addr, round: 0, reward_cycle, aggregate_key, @@ -3361,7 +3321,7 @@ fn vote_for_aggregate_key_burn_op() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut signer_burnop_signer = BurnchainOpSigner::new(signer_sk.clone(), false); + let mut signer_burnop_signer = BurnchainOpSigner::new(signer_sk, false); assert!( btc_regtest_controller .submit_operation( @@ -3394,10 +3354,10 @@ fn vote_for_aggregate_key_burn_op() { for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { - info!("Found a burn op: {:?}", tx); + info!("Found a burn op: {tx:?}"); let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); if !burnchain_op.contains_key("vote_for_aggregate_key") { - warn!("Got unexpected burnchain op: {:?}", burnchain_op); + warn!("Got unexpected burnchain op: {burnchain_op:?}"); panic!("unexpected btc transaction type"); } let vote_obj = burnchain_op.get("vote_for_aggregate_key").unwrap(); @@ -3447,7 +3407,7 @@ fn follower_bootup() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -3456,13 +3416,10 @@ fn follower_bootup() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -3575,7 +3532,7 @@ fn follower_bootup() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - debug!("follower_bootup: Miner runs tenure {}", tenure_ix); + debug!("follower_bootup: Miner runs tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -3583,10 +3540,7 @@ fn follower_bootup() { let mut last_tip = BlockHeaderHash([0x00; 32]); let mut last_nonce = None; - debug!( - "follower_bootup: Miner mines interum blocks for tenure {}", - tenure_ix - ); + debug!("follower_bootup: Miner mines interum blocks for tenure {tenure_ix}"); // mine the interim blocks for _ in 0..inter_blocks_per_tenure { @@ -3622,8 +3576,8 @@ fn follower_bootup() { let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); - debug!("follower_bootup: Miner account: {:?}", &account); - debug!("follower_bootup: Miner sent {}: {:?}", &tx.txid(), &tx); + debug!("follower_bootup: Miner account: {account:?}"); + debug!("follower_bootup: Miner sent {}: {tx:?}", &tx.txid()); let now = get_epoch_time_secs(); while get_epoch_time_secs() < now + 10 { @@ -3768,13 +3722,13 @@ fn follower_bootup_across_multiple_cycles() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.node.pox_sync_sample_secs = 30; + naka_conf.node.pox_sync_sample_secs = 180; naka_conf.burnchain.max_rbf = 10_000_000; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -3783,13 +3737,10 @@ fn follower_bootup_across_multiple_cycles() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -3857,8 +3808,13 @@ fn follower_bootup_across_multiple_cycles() { .reward_cycle_length * 2 { + let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); + wait_for(20, || { + Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); } info!("Nakamoto miner has advanced two reward cycles"); @@ -3973,7 +3929,7 @@ fn follower_bootup_custom_chain_id() { let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 5; let inter_blocks_per_tenure = 9; // setup sender + recipient for some test stx transfers @@ -3982,13 +3938,10 @@ fn follower_bootup_custom_chain_id() { let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -4101,7 +4054,7 @@ fn follower_bootup_custom_chain_id() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - debug!("follower_bootup: Miner runs tenure {}", tenure_ix); + debug!("follower_bootup: Miner runs tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -4109,10 +4062,7 @@ fn follower_bootup_custom_chain_id() { let mut last_tip = BlockHeaderHash([0x00; 32]); let mut last_nonce = None; - debug!( - "follower_bootup: Miner mines interum blocks for tenure {}", - tenure_ix - ); + debug!("follower_bootup: Miner mines interum blocks for tenure {tenure_ix}"); // mine the interim blocks for _ in 0..inter_blocks_per_tenure { @@ -4148,8 +4098,8 @@ fn follower_bootup_custom_chain_id() { let tx = StacksTransaction::consensus_deserialize(&mut &transfer_tx[..]).unwrap(); - debug!("follower_bootup: Miner account: {:?}", &account); - debug!("follower_bootup: Miner sent {}: {:?}", &tx.txid(), &tx); + debug!("follower_bootup: Miner account: {account:?}"); + debug!("follower_bootup: Miner sent {}: {tx:?}", &tx.txid()); let now = get_epoch_time_secs(); while get_epoch_time_secs() < now + 10 { @@ -4327,23 +4277,14 @@ fn burn_ops_integration_test() { let sender_addr = tests::to_addr(&sender_sk); let mut sender_nonce = 0; - let mut signers = TestSigners::new(vec![signer_sk_1.clone()]); + let mut signers = TestSigners::new(vec![signer_sk_1]); let stacker_sk = setup_stacker(&mut naka_conf); // Add the initial balances to the other accounts - naka_conf.add_initial_balance( - PrincipalData::from(stacker_addr_1.clone()).to_string(), - 1000000, - ); - naka_conf.add_initial_balance( - PrincipalData::from(stacker_addr_2.clone()).to_string(), - 1000000, - ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), - 100_000_000, - ); + naka_conf.add_initial_balance(PrincipalData::from(stacker_addr_1).to_string(), 1000000); + naka_conf.add_initial_balance(PrincipalData::from(stacker_addr_2).to_string(), 1000000); + naka_conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 100_000_000); test_observer::spawn(); test_observer::register_any(&mut naka_conf); @@ -4396,7 +4337,7 @@ fn burn_ops_integration_test() { info!("Submitting first pre-stx op"); let pre_stx_op = PreStxOp { - output: signer_addr_1.clone(), + output: signer_addr_1, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -4427,7 +4368,7 @@ fn burn_ops_integration_test() { let mut miner_signer_2 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting second pre-stx op"); let pre_stx_op_2 = PreStxOp { - output: signer_addr_2.clone(), + output: signer_addr_2, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -4449,7 +4390,7 @@ fn burn_ops_integration_test() { let mut miner_signer_3 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting third pre-stx op"); let pre_stx_op_3 = PreStxOp { - output: stacker_addr_1.clone(), + output: stacker_addr_1, txid: Txid([0u8; 32]), vtxindex: 0, block_height: 0, @@ -4470,7 +4411,7 @@ fn burn_ops_integration_test() { info!("Submitting fourth pre-stx op"); let mut miner_signer_4 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); let pre_stx_op_4 = PreStxOp { - output: stacker_addr_2.clone(), + output: stacker_addr_2, txid: Txid([0u8; 32]), vtxindex: 0, block_height: 0, @@ -4567,10 +4508,10 @@ fn burn_ops_integration_test() { "reward_cycle" => reward_cycle, ); - let mut signer_burnop_signer_1 = BurnchainOpSigner::new(signer_sk_1.clone(), false); - let mut signer_burnop_signer_2 = BurnchainOpSigner::new(signer_sk_2.clone(), false); - let mut stacker_burnop_signer_1 = BurnchainOpSigner::new(stacker_sk_1.clone(), false); - let mut stacker_burnop_signer_2 = BurnchainOpSigner::new(stacker_sk_2.clone(), false); + let mut signer_burnop_signer_1 = BurnchainOpSigner::new(signer_sk_1, false); + let mut signer_burnop_signer_2 = BurnchainOpSigner::new(signer_sk_2, false); + let mut stacker_burnop_signer_1 = BurnchainOpSigner::new(stacker_sk_1, false); + let mut stacker_burnop_signer_2 = BurnchainOpSigner::new(stacker_sk_2, false); info!( "Before stack-stx op, signer 1 total: {}", @@ -4604,8 +4545,8 @@ fn burn_ops_integration_test() { info!("Submitting transfer STX op"); let transfer_stx_op = TransferStxOp { - sender: stacker_addr_1.clone(), - recipient: stacker_addr_2.clone(), + sender: stacker_addr_1, + recipient: stacker_addr_2, transfered_ustx: 10000, memo: vec![], txid: Txid([0u8; 32]), @@ -4627,8 +4568,8 @@ fn burn_ops_integration_test() { info!("Submitting delegate STX op"); let del_stx_op = DelegateStxOp { - sender: stacker_addr_2.clone(), - delegate_to: stacker_addr_1.clone(), + sender: stacker_addr_2, + delegate_to: stacker_addr_1, reward_addr: None, delegated_ustx: 100_000, // to be filled in @@ -4655,7 +4596,7 @@ fn burn_ops_integration_test() { let min_stx = pox_info.next_cycle.min_threshold_ustx; let stack_stx_op_with_some_signer_key = StackStxOp { - sender: signer_addr_1.clone(), + sender: signer_addr_1, reward_addr: pox_addr, stacked_ustx: min_stx.into(), num_cycles: lock_period, @@ -4682,7 +4623,7 @@ fn burn_ops_integration_test() { ); let stack_stx_op_with_no_signer_key = StackStxOp { - sender: signer_addr_2.clone(), + sender: signer_addr_2, reward_addr: PoxAddress::Standard(signer_addr_2, None), stacked_ustx: 100000, num_cycles: 6, @@ -4767,7 +4708,7 @@ fn burn_ops_integration_test() { for tx in transactions.iter().rev() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { - info!("Found a burn op: {:?}", tx); + info!("Found a burn op: {tx:?}"); assert!(block_has_tenure_change, "Block should have a tenure change"); let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); if burnchain_op.contains_key("transfer_stx") { @@ -4785,15 +4726,14 @@ fn burn_ops_integration_test() { assert_eq!(recipient, stacker_addr_2.to_string()); assert_eq!(transfered_ustx, 10000); info!( - "Transfer STX op: sender: {}, recipient: {}, transfered_ustx: {}", - sender, recipient, transfered_ustx + "Transfer STX op: sender: {sender}, recipient: {recipient}, transfered_ustx: {transfered_ustx}" ); assert!(!transfer_stx_found, "Transfer STX op should be unique"); transfer_stx_found = true; continue; } if burnchain_op.contains_key("delegate_stx") { - info!("Got delegate STX op: {:?}", burnchain_op); + info!("Got delegate STX op: {burnchain_op:?}"); let delegate_stx_obj = burnchain_op.get("delegate_stx").unwrap(); let sender_obj = delegate_stx_obj.get("sender").unwrap(); let sender = sender_obj.get("address").unwrap().as_str().unwrap(); @@ -4812,7 +4752,7 @@ fn burn_ops_integration_test() { continue; } if !burnchain_op.contains_key("stack_stx") { - warn!("Got unexpected burnchain op: {:?}", burnchain_op); + warn!("Got unexpected burnchain op: {burnchain_op:?}"); panic!("unexpected btc transaction type"); } let stack_stx_obj = burnchain_op.get("stack_stx").unwrap(); @@ -4883,7 +4823,7 @@ fn burn_ops_integration_test() { for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh).unwrap(); for stacking_op in stacking_ops.into_iter() { - debug!("Stacking op queried from sortdb: {:?}", stacking_op); + debug!("Stacking op queried from sortdb: {stacking_op:?}"); match stacking_op.signer_key { Some(_) => found_some = true, None => found_none = true, @@ -4932,23 +4872,21 @@ fn forked_tenure_is_ignored() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(10); + naka_conf.miner.block_commit_delay = Duration::from_secs(0); let sender_sk = Secp256k1PrivateKey::new(); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let stacker_sk = setup_stacker(&mut naka_conf); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -5052,7 +4990,7 @@ fn forked_tenure_is_ignored() { // Unpause the broadcast of Tenure B's block, do not submit commits, and do not allow blocks to // be processed - test_skip_commit_op.0.lock().unwrap().replace(true); + test_skip_commit_op.set(true); TEST_BROADCAST_STALL.lock().unwrap().replace(false); // Wait for a stacks block to be broadcasted. @@ -5076,7 +5014,7 @@ fn forked_tenure_is_ignored() { .nakamoto_blocks_db() .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) .unwrap() - .get(0) + .first() .cloned() .unwrap(); @@ -5105,7 +5043,7 @@ fn forked_tenure_is_ignored() { .expect("Mutex poisoned") .get_stacks_blocks_processed(); next_block_and(&mut btc_regtest_controller, 60, || { - test_skip_commit_op.0.lock().unwrap().replace(false); + test_skip_commit_op.set(false); TEST_BLOCK_ANNOUNCE_STALL.lock().unwrap().replace(false); let commits_count = commits_submitted.load(Ordering::SeqCst); let blocks_count = mined_blocks.load(Ordering::SeqCst); @@ -5305,13 +5243,11 @@ fn check_block_heights() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + naka_conf.miner.tenure_cost_limit_per_block_percentage = None; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -5395,12 +5331,12 @@ fn check_block_heights() { vec![], ); let preheights = heights0_value.expect_tuple().unwrap(); - info!("Heights from pre-epoch 3.0: {}", preheights); + info!("Heights from pre-epoch 3.0: {preheights}"); wait_for_first_naka_block_commit(60, &commits_submitted); let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); // With the first Nakamoto block, the chain tip and the number of tenures // must be the same (before Nakamoto every block counts as a tenure) @@ -5418,7 +5354,7 @@ fn check_block_heights() { vec![], ); let heights0 = heights0_value.expect_tuple().unwrap(); - info!("Heights from epoch 3.0 start: {}", heights0); + info!("Heights from epoch 3.0 start: {heights0}"); assert_eq!( heights0.get("burn-block-height"), preheights.get("burn-block-height"), @@ -5467,7 +5403,7 @@ fn check_block_heights() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - info!("Mining tenure {}", tenure_ix); + info!("Mining tenure {tenure_ix}"); let commits_before = commits_submitted.load(Ordering::SeqCst); next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) .unwrap(); @@ -5489,7 +5425,7 @@ fn check_block_heights() { vec![], ); let heights1 = heights1_value.expect_tuple().unwrap(); - info!("Heights from Clarity 1: {}", heights1); + info!("Heights from Clarity 1: {heights1}"); let heights3_value = call_read_only( &naka_conf, @@ -5499,7 +5435,7 @@ fn check_block_heights() { vec![], ); let heights3 = heights3_value.expect_tuple().unwrap(); - info!("Heights from Clarity 3: {}", heights3); + info!("Heights from Clarity 3: {heights3}"); let bbh1 = heights1 .get("burn-block-height") @@ -5599,7 +5535,7 @@ fn check_block_heights() { vec![], ); let heights1 = heights1_value.expect_tuple().unwrap(); - info!("Heights from Clarity 1: {}", heights1); + info!("Heights from Clarity 1: {heights1}"); let heights3_value = call_read_only( &naka_conf, @@ -5609,7 +5545,7 @@ fn check_block_heights() { vec![], ); let heights3 = heights3_value.expect_tuple().unwrap(); - info!("Heights from Clarity 3: {}", heights3); + info!("Heights from Clarity 3: {heights3}"); let bbh1 = heights1 .get("burn-block-height") @@ -5724,17 +5660,11 @@ fn nakamoto_attempt_time() { let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), - 1_000_000_000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), 1_000_000_000); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100_000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100_000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); @@ -5850,7 +5780,7 @@ fn nakamoto_attempt_time() { // mine the interim blocks for tenure_count in 0..inter_blocks_per_tenure { - debug!("nakamoto_attempt_time: begin tenure {}", tenure_count); + debug!("nakamoto_attempt_time: begin tenure {tenure_count}"); let blocks_processed_before = coord_channel .lock() @@ -5988,8 +5918,7 @@ fn nakamoto_attempt_time() { break 'submit_txs; } info!( - "nakamoto_times_ms: on account {}; sent {} txs so far (out of {})", - acct_idx, tx_count, tx_limit + "nakamoto_times_ms: on account {acct_idx}; sent {tx_count} txs so far (out of {tx_limit})" ); } acct_idx += 1; @@ -6057,13 +5986,11 @@ fn clarity_burn_state() { let tx_fee = 1000; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), deploy_fee + tx_fee * tenure_count + tx_fee * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + naka_conf.miner.tenure_cost_limit_per_block_percentage = None; let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -6140,7 +6067,7 @@ fn clarity_burn_state() { // Mine `tenure_count` nakamoto tenures for tenure_ix in 0..tenure_count { - info!("Mining tenure {}", tenure_ix); + info!("Mining tenure {tenure_ix}"); // Don't submit this tx on the first iteration, because the contract is not published yet. if tenure_ix > 0 { @@ -6201,7 +6128,7 @@ fn clarity_burn_state() { let info = get_chain_info(&naka_conf); burn_block_height = info.burn_block_height as u128; - info!("Expecting burn block height to be {}", burn_block_height); + info!("Expecting burn block height to be {burn_block_height}"); // Assert that the contract call was successful test_observer::get_mined_nakamoto_blocks() @@ -6216,11 +6143,11 @@ fn clarity_burn_state() { return; } - info!("Contract call result: {}", result); + info!("Contract call result: {result}"); result.clone().expect_result_ok().expect("Ok result"); } _ => { - info!("Unsuccessful event: {:?}", event); + info!("Unsuccessful event: {event:?}"); panic!("Expected a successful transaction"); } }); @@ -6242,7 +6169,7 @@ fn clarity_burn_state() { "foo", vec![&expected_height], ); - info!("Read-only result: {:?}", result); + info!("Read-only result: {result:?}"); result.expect_result_ok().expect("Read-only call failed"); // Submit a tx to trigger the next block @@ -6278,11 +6205,11 @@ fn clarity_burn_state() { .iter() .for_each(|event| match event { TransactionEvent::Success(TransactionSuccessEvent { result, .. }) => { - info!("Contract call result: {}", result); + info!("Contract call result: {result}"); result.clone().expect_result_ok().expect("Ok result"); } _ => { - info!("Unsuccessful event: {:?}", event); + info!("Unsuccessful event: {event:?}"); panic!("Expected a successful transaction"); } }); @@ -6308,6 +6235,7 @@ fn clarity_burn_state() { #[test] #[ignore] +#[allow(clippy::drop_non_drop)] fn signer_chainstate() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; @@ -6315,7 +6243,7 @@ fn signer_chainstate() { let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); @@ -6325,15 +6253,12 @@ fn signer_chainstate() { let send_amt = 1000; let send_fee = 200; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * 20, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -6389,7 +6314,7 @@ fn signer_chainstate() { .unwrap() .unwrap() .stacks_block_height; - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); wait_for(10, || { let client = reqwest::blocking::Client::new(); let res = client @@ -6451,6 +6376,7 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); @@ -6589,6 +6515,7 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }; let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() @@ -6623,10 +6550,10 @@ fn signer_chainstate() { valid: Some(true), signed_over: true, proposed_time: get_epoch_time_secs(), - signed_self: None, - signed_group: None, + signed_self: Some(get_epoch_time_secs()), + signed_group: Some(get_epoch_time_secs()), ext: ExtraBlockInfo::None, - state: BlockState::Unprocessed, + state: BlockState::GloballyAccepted, }) .unwrap(); @@ -6640,13 +6567,13 @@ fn signer_chainstate() { // Case: the block doesn't confirm the prior blocks that have been signed. let last_tenure = &last_tenures_proposals.as_ref().unwrap().1.clone(); let last_tenure_header = &last_tenure.header; - let miner_sk = naka_conf.miner.mining_key.clone().unwrap(); + let miner_sk = naka_conf.miner.mining_key.unwrap(); let miner_pk = StacksPublicKey::from_private(&miner_sk); let mut sibling_block_header = NakamotoBlockHeader { version: 1, chain_length: last_tenure_header.chain_length, burn_spent: last_tenure_header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), + consensus_hash: last_tenure_header.consensus_hash, parent_block_id: last_tenure_header.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), @@ -6666,6 +6593,7 @@ fn signer_chainstate() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }; let mut sortitions_view = SortitionsView::fetch_view(proposal_conf, &signer_client).unwrap(); let burn_block_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) @@ -6694,8 +6622,8 @@ fn signer_chainstate() { version: 1, chain_length: last_tenure_header.chain_length, burn_spent: last_tenure_header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), - parent_block_id: last_tenure_header.parent_block_id.clone(), + consensus_hash: last_tenure_header.consensus_hash, + parent_block_id: last_tenure_header.parent_block_id, tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), timestamp: last_tenure_header.timestamp + 1, @@ -6752,7 +6680,7 @@ fn signer_chainstate() { version: 1, chain_length: reorg_to_block.header.chain_length + 1, burn_spent: reorg_to_block.header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), + consensus_hash: last_tenure_header.consensus_hash, parent_block_id: reorg_to_block.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), @@ -6783,9 +6711,9 @@ fn signer_chainstate() { post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], payload: TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), - prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), - burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + tenure_consensus_hash: sibling_block_header.consensus_hash, + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash, + burn_view_consensus_hash: sibling_block_header.consensus_hash, previous_tenure_end: reorg_to_block.block_id(), previous_tenure_blocks: 1, cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, @@ -6813,12 +6741,12 @@ fn signer_chainstate() { // Case: the block contains a tenure change, but the parent tenure is a reorg let reorg_to_block = first_tenure_blocks.as_ref().unwrap().last().unwrap(); // make the sortition_view *think* that our block commit pointed at this old tenure - sortitions_view.cur_sortition.parent_tenure_id = reorg_to_block.header.consensus_hash.clone(); + sortitions_view.cur_sortition.parent_tenure_id = reorg_to_block.header.consensus_hash; let mut sibling_block_header = NakamotoBlockHeader { version: 1, chain_length: reorg_to_block.header.chain_length + 1, burn_spent: reorg_to_block.header.burn_spent, - consensus_hash: last_tenure_header.consensus_hash.clone(), + consensus_hash: last_tenure_header.consensus_hash, parent_block_id: reorg_to_block.block_id(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[0]), state_index_root: TrieHash([0; 32]), @@ -6849,9 +6777,9 @@ fn signer_chainstate() { post_condition_mode: TransactionPostConditionMode::Allow, post_conditions: vec![], payload: TransactionPayload::TenureChange(TenureChangePayload { - tenure_consensus_hash: sibling_block_header.consensus_hash.clone(), - prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash.clone(), - burn_view_consensus_hash: sibling_block_header.consensus_hash.clone(), + tenure_consensus_hash: sibling_block_header.consensus_hash, + prev_tenure_consensus_hash: reorg_to_block.header.consensus_hash, + burn_view_consensus_hash: sibling_block_header.consensus_hash, previous_tenure_end: reorg_to_block.block_id(), previous_tenure_blocks: 1, cause: stacks::chainstate::stacks::TenureChangeCause::BlockFound, @@ -6890,7 +6818,7 @@ fn signer_chainstate() { // every step of the return should be linked to the parent let mut prior: Option<&TenureForkingInfo> = None; for step in fork_info.iter().rev() { - if let Some(ref prior) = prior { + if let Some(prior) = prior { assert_eq!(prior.sortition_id, step.parent_sortition_id); } prior = Some(step); @@ -6929,7 +6857,7 @@ fn continue_tenure_extend() { let mut signers = TestSigners::default(); let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); let http_origin = naka_conf.node.data_url.clone(); @@ -6939,15 +6867,12 @@ fn continue_tenure_extend() { let send_amt = 1000; let send_fee = 200; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * 20, ); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let mut transfer_nonce = 0; @@ -7006,7 +6931,7 @@ fn continue_tenure_extend() { // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); wait_for(10, || { let client = reqwest::blocking::Client::new(); let res = client @@ -7055,7 +6980,7 @@ fn continue_tenure_extend() { .get_stacks_blocks_processed(); info!("Pausing commit ops to trigger a tenure extend."); - test_skip_commit_op.0.lock().unwrap().replace(true); + test_skip_commit_op.set(true); next_block_and(&mut btc_regtest_controller, 60, || Ok(true)).unwrap(); @@ -7154,7 +7079,7 @@ fn continue_tenure_extend() { } info!("Resuming commit ops to mine regular tenures."); - test_skip_commit_op.0.lock().unwrap().replace(false); + test_skip_commit_op.set(false); // Mine 15 more regular nakamoto tenures for _i in 0..15 { @@ -7195,7 +7120,7 @@ fn continue_tenure_extend() { let mut has_extend = false; for tx in block["transactions"].as_array().unwrap() { let raw_tx = tx["raw_tx"].as_str().unwrap(); - if raw_tx == &transfer_tx_hex { + if raw_tx == transfer_tx_hex { transfer_tx_included = true; continue; } @@ -7204,8 +7129,9 @@ fn continue_tenure_extend() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) => match payload.cause { + + if let TransactionPayload::TenureChange(payload) = &parsed.payload { + match payload.cause { TenureChangeCause::Extended => { has_extend = true; tenure_extends.push(parsed); @@ -7216,9 +7142,8 @@ fn continue_tenure_extend() { } tenure_block_founds.push(parsed); } - }, - _ => {} - }; + }; + } } last_block_had_extend = has_extend; } @@ -7243,7 +7168,7 @@ fn continue_tenure_extend() { // make sure prometheus returns an updated height #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); wait_for(10, || { let client = reqwest::blocking::Client::new(); let res = client @@ -7281,8 +7206,8 @@ fn get_block_times( info!("Getting block times at block {block_height}, tenure {tenure_height}..."); let time0_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract0_name, "get-time", vec![&clarity::vm::Value::UInt(tenure_height)], @@ -7295,8 +7220,8 @@ fn get_block_times( .unwrap(); let time_now0_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract0_name, "get-last-time", vec![], @@ -7309,8 +7234,8 @@ fn get_block_times( .unwrap(); let time1_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract1_name, "get-time", vec![&clarity::vm::Value::UInt(tenure_height)], @@ -7323,8 +7248,8 @@ fn get_block_times( .unwrap(); let time1_now_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract1_name, "get-last-time", vec![], @@ -7337,8 +7262,8 @@ fn get_block_times( .unwrap(); let time3_tenure_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract3_name, "get-tenure-time", vec![&clarity::vm::Value::UInt(block_height)], @@ -7351,8 +7276,8 @@ fn get_block_times( .unwrap(); let time3_block_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract3_name, "get-block-time", vec![&clarity::vm::Value::UInt(block_height)], @@ -7365,8 +7290,8 @@ fn get_block_times( .unwrap(); let time3_now_tenure_value = call_read_only( - &naka_conf, - &sender_addr, + naka_conf, + sender_addr, contract3_name, "get-last-tenure-time", vec![], @@ -7433,13 +7358,10 @@ fn check_block_times() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * 12, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -7504,7 +7426,7 @@ fn check_block_times() { blind_signer(&naka_conf, &signers, proposals_submitted); let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3 = &epochs[StacksEpochId::Epoch30]; let epoch_3_start = epoch_3.start_height; let mut last_stacks_block_height = 0; let mut last_tenure_height = 0; @@ -7529,7 +7451,7 @@ fn check_block_times() { .unwrap() .expect_u128() .unwrap(); - info!("Time from pre-epoch 3.0: {}", time0); + info!("Time from pre-epoch 3.0: {time0}"); // This version uses the Clarity 1 / 2 function let contract1_name = "test-contract-1"; @@ -7823,6 +7745,7 @@ fn check_block_info() { naka_conf.burnchain.chain_id = CHAIN_ID_TESTNET + 1; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.miner.tenure_cost_limit_per_block_percentage = None; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); @@ -7834,13 +7757,10 @@ fn check_block_info() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * 2, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); let contract3_name = "test-contract-3"; @@ -7972,7 +7892,7 @@ fn check_block_info() { blind_signer(&naka_conf, &signers, proposals_submitted); let c0_block_ht_1_pre_3 = get_block_info(contract0_name, 1); - info!("Info from pre-epoch 3.0: {:?}", c0_block_ht_1_pre_3); + info!("Info from pre-epoch 3.0: {c0_block_ht_1_pre_3:?}"); wait_for_first_naka_block_commit(60, &commits_submitted); @@ -8040,7 +7960,7 @@ fn check_block_info() { // one in the tenure) let info = get_chain_info(&naka_conf); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let last_stacks_block_height = info.stacks_tip_height as u128; let last_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let last_tenure_height: u128 = @@ -8063,7 +7983,7 @@ fn check_block_info() { .unwrap(); let info = get_chain_info(&naka_conf); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let cur_stacks_block_height = info.stacks_tip_height as u128; let cur_stacks_tip = StacksBlockId::new(&info.stacks_tip_consensus_hash, &info.stacks_tip); let cur_tenure_height: u128 = @@ -8382,7 +8302,7 @@ fn check_block_info() { run_loop_thread.join().unwrap(); } -fn get_expected_reward_for_height(blocks: &Vec, block_height: u128) -> u128 { +fn get_expected_reward_for_height(blocks: &[serde_json::Value], block_height: u128) -> u128 { // Find the target block let target_block = blocks .iter() @@ -8469,13 +8389,10 @@ fn check_block_info_rewards() { let send_fee = 180; let deploy_fee = 3000; naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), 3 * deploy_fee + (send_amt + send_fee) * 2, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -8561,7 +8478,7 @@ fn check_block_info_rewards() { blind_signer(&naka_conf, &signers, proposals_submitted); let tuple0 = get_block_info(contract0_name, 1); - info!("Info from pre-epoch 3.0: {:?}", tuple0); + info!("Info from pre-epoch 3.0: {tuple0:?}"); wait_for_first_naka_block_commit(60, &commits_submitted); @@ -8673,7 +8590,7 @@ fn check_block_info_rewards() { } let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let (chainstate, _) = StacksChainState::open( naka_conf.is_mainnet(), naka_conf.burnchain.chain_id, @@ -8706,7 +8623,7 @@ fn check_block_info_rewards() { } let info = get_chain_info_result(&naka_conf).unwrap(); - info!("Chain info: {:?}", info); + info!("Chain info: {info:?}"); let last_stacks_block_height = info.stacks_tip_height as u128; let blocks = test_observer::get_blocks(); @@ -8795,10 +8712,11 @@ fn mock_mining() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.node.pox_sync_sample_secs = 30; + naka_conf.miner.tenure_cost_limit_per_block_percentage = None; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); + let mut signers = TestSigners::new(vec![sender_signer_sk]); let tenure_count = 3; let inter_blocks_per_tenure = 3; // setup sender + recipient for some test stx transfers @@ -8820,13 +8738,10 @@ fn mock_mining() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -9010,18 +8925,22 @@ fn mock_mining() { Ok(follower_naka_mined_blocks.load(Ordering::SeqCst) > follower_naka_mined_blocks_before) }) - .expect(&format!( - "Timed out waiting for mock miner block {}", - follower_naka_mined_blocks_before + 1 - )); + .unwrap_or_else(|_| { + panic!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + ) + }); wait_for(20, || { Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) }) - .expect(&format!( - "Timed out waiting for mock miner block {}", - follower_naka_mined_blocks_before + 1 - )); + .unwrap_or_else(|_| { + panic!( + "Timed out waiting for mock miner block {}", + follower_naka_mined_blocks_before + 1 + ) + }); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -9047,9 +8966,7 @@ fn mock_mining() { let blocks_mock_mined = mock_mining_blocks_end - mock_mining_blocks_start; assert!( blocks_mock_mined >= tenure_count, - "Should have mock mined at least `tenure_count` nakamoto blocks. Mined = {}. Expected = {}", - blocks_mock_mined, - tenure_count, + "Should have mock mined at least `tenure_count` nakamoto blocks. Mined = {blocks_mock_mined}. Expected = {tenure_count}" ); // wait for follower to reach the chain tip @@ -9094,8 +9011,8 @@ fn utxo_check_on_startup_panic() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - println!("Nakamoto node started with config: {:?}", naka_conf); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + println!("Nakamoto node started with config: {naka_conf:?}"); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); @@ -9170,8 +9087,8 @@ fn utxo_check_on_startup_recover() { } let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - println!("Nakamoto node started with config: {:?}", naka_conf); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); + println!("Nakamoto node started with config: {naka_conf:?}"); + let prom_bind = "127.0.0.1:6000".to_string(); naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1000); @@ -9245,10 +9162,10 @@ fn v3_signer_api_endpoint() { let send_amt = 100; let send_fee = 180; conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), + PrincipalData::from(sender_addr).to_string(), send_amt + send_fee, ); - conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); // only subscribe to the block proposal events @@ -9274,7 +9191,7 @@ fn v3_signer_api_endpoint() { let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); - let mut signers = TestSigners::new(vec![signer_sk.clone()]); + let mut signers = TestSigners::new(vec![signer_sk]); wait_for_runloop(&blocks_processed); boot_to_epoch_3( &conf, @@ -9394,118 +9311,239 @@ fn v3_signer_api_endpoint() { run_loop_thread.join().unwrap(); } +/// Test `/v3/blocks/height` API endpoint +/// +/// This endpoint returns the block blob given a height #[test] #[ignore] -/// This test spins up a nakamoto-neon node. -/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches -/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop -/// struct handles the epoch-2/3 tear-down and spin-up. -/// This test asserts that a long running transaction doesn't get mined, -/// but that the stacks-node continues to make progress -fn skip_mining_long_tx() { +fn v3_blockbyheight_api_endpoint() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let prom_bind = format!("{}:{}", "127.0.0.1", 6000); - naka_conf.node.prometheus_bind = Some(prom_bind.clone()); - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - naka_conf.miner.nakamoto_attempt_time_ms = 5_000; - let sender_1_sk = Secp256k1PrivateKey::from_seed(&[30]); - let sender_2_sk = Secp256k1PrivateKey::from_seed(&[31]); - // setup sender + recipient for a test stx transfer - let sender_1_addr = tests::to_addr(&sender_1_sk); - let sender_2_addr = tests::to_addr(&sender_2_sk); - let send_amt = 1000; + let (mut conf, _miner_account) = naka_neon_integration_conf(None); + let password = "12345".to_string(); + conf.connection_options.auth_token = Some(password.clone()); + conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let stacker_sk = setup_stacker(&mut conf); + let signer_sk = Secp256k1PrivateKey::new(); + let signer_addr = tests::to_addr(&signer_sk); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; let send_fee = 180; - naka_conf.add_initial_balance( - PrincipalData::from(sender_1_addr.clone()).to_string(), - send_amt * 15 + send_fee * 15, - ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_2_addr.clone()).to_string(), - 10000, - ); - let sender_signer_sk = Secp256k1PrivateKey::new(); - let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let mut signers = TestSigners::new(vec![sender_signer_sk.clone()]); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, + conf.add_initial_balance( + PrincipalData::from(sender_addr).to_string(), + send_amt + send_fee, ); - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let stacker_sk = setup_stacker(&mut naka_conf); - let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + conf.add_initial_balance(PrincipalData::from(signer_addr).to_string(), 100000); + // only subscribe to the block proposal events test_observer::spawn(); - test_observer::register_any(&mut naka_conf); + test_observer::register(&mut conf, &[EventKeyType::BlockProposal]); - let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() .expect("Failed starting bitcoind"); - let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); btc_regtest_controller.bootstrap_chain(201); - let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let mut run_loop = boot_nakamoto::BootRunLoop::new(conf.clone()).unwrap(); let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, - naka_mined_blocks: mined_naka_blocks, .. } = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + let mut signers = TestSigners::new(vec![signer_sk]); wait_for_runloop(&blocks_processed); boot_to_epoch_3( - &naka_conf, + &conf, &blocks_processed, &[stacker_sk], - &[sender_signer_sk], + &[signer_sk], &mut Some(&mut signers), &mut btc_regtest_controller, ); - info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + info!("------------------------- Reached Epoch 3.0 -------------------------"); - let burnchain = naka_conf.get_burnchain(); + blind_signer(&conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // Mine 1 nakamoto tenure + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); let (chainstate, _) = StacksChainState::open( - naka_conf.is_mainnet(), - naka_conf.burnchain.chain_id, - &naka_conf.get_chainstate_path_str(), + conf.is_mainnet(), + conf.burnchain.chain_id, + &conf.get_chainstate_path_str(), None, ) .unwrap(); - info!("Nakamoto miner started..."); - blind_signer(&naka_conf, &signers, proposals_submitted); + info!("------------------------- Setup finished, run test -------------------------"); - wait_for_first_naka_block_commit(60, &commits_submitted); + let http_origin = format!("http://{}", &conf.node.rpc_bind); - // submit a long running TX and the transfer TX - let input_list: Vec<_> = (1..100u64).into_iter().map(|x| x.to_string()).collect(); - let input_list = input_list.join(" "); + let get_v3_block_by_height = |height: u64| { + let url = &format!("{http_origin}/v3/blocks/height/{height}"); + info!("Send request: GET {url}"); + reqwest::blocking::get(url).unwrap_or_else(|e| panic!("GET request failed: {e}")) + }; - // Mine a few nakamoto tenures with some interim blocks in them - for i in 0..5 { - let mined_before = mined_naka_blocks.load(Ordering::SeqCst); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() .unwrap(); - if i == 0 { - // we trigger the nakamoto miner to evaluate the long running transaction, - // but we disable the block broadcast, so the tx doesn't end up included in a + let block_height = tip.stacks_block_height; + let block_data = get_v3_block_by_height(block_height); + + assert!(block_data.status().is_success()); + let block_bytes_vec = block_data.bytes().unwrap().to_vec(); + assert!(block_bytes_vec.len() > 0); + + // does the block id of the returned blob matches ? + let block_id = NakamotoBlockHeader::consensus_deserialize(&mut block_bytes_vec.as_slice()) + .unwrap() + .block_id(); + assert_eq!(block_id, tip.index_block_hash()); + + info!("------------------------- Test finished, clean up -------------------------"); + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} + +#[test] +#[ignore] +/// This test spins up a nakamoto-neon node. +/// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, and then switches +/// to Nakamoto operation (activating pox-4 by submitting a stack-stx tx). The BootLoop +/// struct handles the epoch-2/3 tear-down and spin-up. +/// This test asserts that a long running transaction doesn't get mined, +/// but that the stacks-node continues to make progress +fn skip_mining_long_tx() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = "127.0.0.1:6000".to_string(); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.miner.nakamoto_attempt_time_ms = 5_000; + naka_conf.miner.tenure_cost_limit_per_block_percentage = None; + let sender_1_sk = Secp256k1PrivateKey::from_seed(&[30]); + let sender_2_sk = Secp256k1PrivateKey::from_seed(&[31]); + // setup sender + recipient for a test stx transfer + let sender_1_addr = tests::to_addr(&sender_1_sk); + let sender_2_addr = tests::to_addr(&sender_2_sk); + let send_amt = 1000; + let send_fee = 180; + naka_conf.add_initial_balance( + PrincipalData::from(sender_1_addr).to_string(), + send_amt * 15 + send_fee * 15, + ); + naka_conf.add_initial_balance(PrincipalData::from(sender_2_addr).to_string(), 10000); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let stacker_sk = setup_stacker(&mut naka_conf); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + naka_mined_blocks: mined_naka_blocks, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // submit a long running TX and the transfer TX + let input_list: Vec<_> = (1..100u64).map(|x| x.to_string()).collect(); + let input_list = input_list.join(" "); + + // Mine a few nakamoto tenures with some interim blocks in them + for i in 0..5 { + let mined_before = mined_naka_blocks.load(Ordering::SeqCst); + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + if i == 0 { + // we trigger the nakamoto miner to evaluate the long running transaction, + // but we disable the block broadcast, so the tx doesn't end up included in a // confirmed block, even though its been evaluated. // once we've seen the miner increment the mined counter, we allow it to start // broadcasting (because at this point, any future blocks produced will skip the long @@ -9594,3 +9632,480 @@ fn skip_mining_long_tx() { run_loop_thread.join().unwrap(); } + +/// Verify that a node in which there is no prepare-phase block can be recovered by +/// live-instantiating shadow tenures in the prepare phase +#[test] +#[ignore] +fn test_shadow_recovery() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let mut signer_test: SignerTest = SignerTest::new(1, vec![]); + signer_test.boot_to_epoch_3(); + + let naka_conf = signer_test.running_nodes.conf.clone(); + let btc_regtest_controller = &mut signer_test.running_nodes.btc_regtest_controller; + let coord_channel = signer_test.running_nodes.coord_channel.clone(); + let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + + let burnchain = naka_conf.get_burnchain(); + + // make another tenure + next_block_and_mine_commit( + btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + let block_height = btc_regtest_controller.get_headers_height(); + let reward_cycle = btc_regtest_controller + .get_burnchain() + .block_height_to_reward_cycle(block_height) + .unwrap(); + let prepare_phase_start = btc_regtest_controller + .get_burnchain() + .pox_constants + .prepare_phase_start( + btc_regtest_controller.get_burnchain().first_block_height, + reward_cycle, + ); + + let blocks_until_next_rc = prepare_phase_start + 1 - block_height + + (btc_regtest_controller + .get_burnchain() + .pox_constants + .prepare_length as u64) + + 1; + + // kill the chain by blowing through a prepare phase + btc_regtest_controller.bootstrap_chain(blocks_until_next_rc); + let target_burn_height = btc_regtest_controller.get_headers_height(); + + let burnchain = naka_conf.get_burnchain(); + let mut sortdb = burnchain.open_sortition_db(true).unwrap(); + let (mut chainstate, _) = StacksChainState::open( + false, + CHAIN_ID_TESTNET, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + wait_for(30, || { + let burn_height = get_chain_info(&naka_conf).burn_block_height; + if burn_height >= target_burn_height { + return Ok(true); + } + sleep_ms(500); + Ok(false) + }) + .unwrap(); + + let stacks_height_before = get_chain_info(&naka_conf).stacks_tip_height; + + // TODO: stall block processing; otherwise this test can flake + // stop block processing on the node + TEST_COORDINATOR_STALL.lock().unwrap().replace(true); + + // fix node + let shadow_blocks = shadow_chainstate_repair(&mut chainstate, &mut sortdb).unwrap(); + assert!(shadow_blocks.len() > 0); + + wait_for(30, || { + let Some(info) = get_chain_info_opt(&naka_conf) else { + sleep_ms(500); + return Ok(false); + }; + Ok(info.stacks_tip_height >= stacks_height_before) + }) + .unwrap(); + + TEST_COORDINATOR_STALL.lock().unwrap().replace(false); + info!("Beginning post-shadow tenures"); + + // revive ATC-C by waiting for commits + for _i in 0..4 { + btc_regtest_controller.bootstrap_chain(1); + sleep_ms(30_000); + } + + // make another tenure + next_block_and_mine_commit( + btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + + // all shadow blocks are present and processed + let mut shadow_ids = HashSet::new(); + for sb in shadow_blocks { + let (_, processed, orphaned, _) = chainstate + .nakamoto_blocks_db() + .get_block_processed_and_signed_weight( + &sb.header.consensus_hash, + &sb.header.block_hash(), + ) + .unwrap() + .unwrap(); + assert!(processed); + assert!(!orphaned); + shadow_ids.insert(sb.block_id()); + } + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let mut cursor = tip.index_block_hash(); + + // the chainstate has four parts: + // * epoch 2 + // * epoch 3 prior to failure + // * shadow blocks + // * epoch 3 after recovery + // Make sure they're all there + + let mut has_epoch_3_recovery = false; + let mut has_shadow_blocks = false; + let mut has_epoch_3_failure = false; + + loop { + let header = NakamotoChainState::get_block_header(chainstate.db(), &cursor) + .unwrap() + .unwrap(); + if header.anchored_header.as_stacks_epoch2().is_some() { + break; + } + + let header = header.anchored_header.as_stacks_nakamoto().clone().unwrap(); + + if header.is_shadow_block() { + assert!(shadow_ids.contains(&header.block_id())); + } else { + assert!(!shadow_ids.contains(&header.block_id())); + } + + if !header.is_shadow_block() && !has_epoch_3_recovery { + has_epoch_3_recovery = true; + } else if header.is_shadow_block() && has_epoch_3_recovery && !has_shadow_blocks { + has_shadow_blocks = true; + } else if !header.is_shadow_block() + && has_epoch_3_recovery + && has_shadow_blocks + && !has_epoch_3_failure + { + has_epoch_3_failure = true; + } + + cursor = header.parent_block_id; + } + + assert!(has_epoch_3_recovery); + assert!(has_shadow_blocks); + assert!(has_epoch_3_failure); +} + +#[test] +#[ignore] +/// This test is testing that the clarity cost spend down works as expected, +/// spreading clarity contract calls across the tenure instead of all in the first block. +/// It also ensures that the clarity cost resets at the start of each tenure. +fn clarity_cost_spend_down() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + let num_signers = 30; + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sks: Vec<_> = (0..num_signers) + .map(|_| Secp256k1PrivateKey::new()) + .collect(); + let sender_signer_sks: Vec<_> = (0..num_signers) + .map(|_| Secp256k1PrivateKey::new()) + .collect(); + let sender_signer_addrs: Vec<_> = sender_signer_sks.iter().map(tests::to_addr).collect(); + let sender_addrs: Vec<_> = sender_sks.iter().map(tests::to_addr).collect(); + let deployer_sk = sender_sks[0]; + let deployer_addr = sender_addrs[0]; + let mut sender_nonces: HashMap = HashMap::new(); + + let get_and_increment_nonce = + |sender_sk: &Secp256k1PrivateKey, sender_nonces: &mut HashMap| { + let nonce = sender_nonces.get(&sender_sk.to_hex()).unwrap_or(&0); + let result = *nonce; + sender_nonces.insert(sender_sk.to_hex(), result + 1); + result + }; + let tenure_count = 5; + let nmb_txs_per_signer = 2; + let mut signers = TestSigners::new(sender_signer_sks.clone()); + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let tx_fee = 10000; + let small_deploy_fee = 190200; + let large_deploy_fee = 570200; + let amount = + (large_deploy_fee + small_deploy_fee) + tx_fee * nmb_txs_per_signer + 100 * tenure_count; + for sender_addr in sender_addrs { + naka_conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), amount); + } + for sender_signer_addr in sender_signer_addrs { + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr).to_string(), + amount * 2, + ); + } + naka_conf.miner.tenure_cost_limit_per_block_percentage = Some(5); + let stacker_sks: Vec<_> = (0..num_signers) + .map(|_| setup_stacker(&mut naka_conf)) + .collect(); + + test_observer::spawn(); + test_observer::register(&mut naka_conf, &[EventKeyType::MinedBlocks]); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + naka_mined_blocks: mined_blocks, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &stacker_sks, + &sender_signer_sks, + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + let small_contract = format!( + r#" +(define-data-var my-var uint u0) +(define-public (f) (begin {} (ok 1))) (begin (f)) + "#, + (0..250) + .map(|_| format!("(var-get my-var)")) + .collect::>() + .join(" ") + ); + + // Create an expensive contract that will be republished multiple times + let large_contract = format!( + "(define-public (f) (begin {} (ok 1))) (begin (f))", + (0..250) + .map(|_| format!( + "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", + boot_code_id("cost-voting", false), + boot_code_id("costs", false), + boot_code_id("costs", false), + )) + .collect::>() + .join(" ") + ); + + // First, lets deploy the contract + let deployer_nonce = get_and_increment_nonce(&deployer_sk, &mut sender_nonces); + let small_contract_tx = make_contract_publish( + &deployer_sk, + deployer_nonce, + large_deploy_fee, + naka_conf.burnchain.chain_id, + "small-contract", + &small_contract, + ); + submit_tx(&http_origin, &small_contract_tx); + let deployer_nonce = get_and_increment_nonce(&deployer_sk, &mut sender_nonces); + let large_contract_tx = make_contract_publish( + &deployer_sk, + deployer_nonce, + large_deploy_fee, + naka_conf.burnchain.chain_id, + "big-contract", + &large_contract, + ); + submit_tx(&http_origin, &large_contract_tx); + + info!("----- Submitted deploy txs, mining BTC block -----"); + + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + next_block_and(&mut btc_regtest_controller, 60, || { + let blocks_count = mined_blocks.load(Ordering::SeqCst); + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_count > blocks_before && blocks_processed > blocks_processed_before) + }) + .unwrap(); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + let mined_before = test_observer::get_mined_nakamoto_blocks(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + info!("----- Waiting for deploy txs to be mined -----"); + wait_for(30, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before + && test_observer::get_mined_nakamoto_blocks().len() > mined_before.len() + && commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .expect("Timed out waiting for interim blocks to be mined"); + + info!("----- Mining interim blocks -----"); + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + info!("Mining tenure {tenure_ix}"); + // Wait for the tenure change payload to be mined + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and(&mut btc_regtest_controller, 60, || { + let blocks_count = mined_blocks.load(Ordering::SeqCst); + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_count > blocks_before + && blocks_processed > blocks_processed_before + && commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); + + // mine the interim blocks + let mined_before = test_observer::get_mined_nakamoto_blocks(); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // Pause mining so we can add all our transactions to the mempool at once. + TEST_MINE_STALL.lock().unwrap().replace(true); + let mut submitted_txs = vec![]; + for _nmb_tx in 0..nmb_txs_per_signer { + for sender_sk in sender_sks.iter() { + let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces); + // Fill up the mempool with contract calls + let contract_tx = make_contract_call( + &sender_sk, + sender_nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &deployer_addr, + "small-contract", + "f", + &[], + ); + match submit_tx_fallible(&http_origin, &contract_tx) { + Ok(txid) => { + submitted_txs.push(txid); + } + Err(_e) => { + // If we fail to submit a tx, we need to make sure we don't + // increment the nonce for this sender, so we don't end up + // skipping a tx. + sender_nonces.insert(sender_sk.to_hex(), sender_nonce); + } + } + } + } + TEST_MINE_STALL.lock().unwrap().replace(false); + wait_for(120, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed >= blocks_processed_before + 7) + }) + .expect("Timed out waiting for interim blocks to be mined"); + + let mined_after = test_observer::get_mined_nakamoto_blocks(); + let mined_blocks: Vec<_> = mined_after.iter().skip(mined_before.len()).collect(); + let total_nmb_txs = mined_after.iter().map(|b| b.tx_events.len()).sum::(); + let nmb_mined_blocks = mined_blocks.len(); + debug!( + "Mined a total of {total_nmb_txs} transactions across {nmb_mined_blocks} mined blocks" + ); + let mut last_tx_count = None; + for (i, block) in mined_blocks.into_iter().enumerate() { + let tx_count = block.tx_events.len(); + if let Some(count) = last_tx_count { + assert!( + tx_count <= count, + "Expected fewer txs to be mined each block. Last block: {count}, Current block: {tx_count}" + ); + }; + last_tx_count = Some(tx_count); + + // All but the last transaction should hit the soft limit + for (j, tx_event) in block.tx_events.iter().enumerate() { + if let TransactionEvent::Success(TransactionSuccessEvent { + soft_limit_reached, + .. + }) = tx_event + { + if i == nmb_mined_blocks - 1 || j != block.tx_events.len() - 1 { + assert!( + !soft_limit_reached, + "Expected tx to not hit the soft limit in the very last block or in any txs but the last in all other blocks" + ); + } else { + assert!(soft_limit_reached, "Expected tx to hit the soft limit."); + } + } + } + } + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index d6373a3b44..7d9f1f0dc8 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -43,7 +43,7 @@ use stacks::cli::{self, StacksChainConfig}; use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ - self, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, + self, EpochList, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, BLOCK_LIMIT_MAINNET_21, CHAIN_ID_TESTNET, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5, @@ -98,7 +98,7 @@ fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAdd let mut conf = super::new_test_conf(); // tests can override this, but these tests run with epoch 2.05 by default - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -127,7 +127,7 @@ fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAdd block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]); + ])); let seed = seed.unwrap_or(conf.node.seed.clone()); conf.node.seed = seed; @@ -158,7 +158,7 @@ fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAdd .unwrap() .burnchain .magic_bytes; - assert_eq!(magic_bytes.as_bytes(), &['T' as u8, '2' as u8]); + assert_eq!(magic_bytes.as_bytes(), b"T2"); conf.burnchain.magic_bytes = magic_bytes; conf.burnchain.poll_time_secs = 1; conf.node.pox_sync_sample_secs = 0; @@ -371,8 +371,10 @@ pub mod test_observer { inner_obj } else if let Some(inner_obj) = txevent_obj.get("Skipped") { inner_obj + } else if let Some(inner_obj) = txevent_obj.get("Problematic") { + inner_obj } else { - panic!("TransactionEvent object should have one of Success, ProcessingError, or Skipped") + panic!("TransactionEvent object should have one of Success, ProcessingError, Skipped, or Problematic. Had keys: {:?}", txevent_obj.keys().map(|x| x.to_string()).collect::>()); }; inner_obj .as_object() @@ -391,7 +393,7 @@ pub mod test_observer { let new_rawtxs = txs .as_array() .unwrap() - .into_iter() + .iter() .map(|x| x.as_str().unwrap().to_string()); let mut memtxs = MEMTXS.lock().unwrap(); for new_tx in new_rawtxs { @@ -408,7 +410,7 @@ pub mod test_observer { .unwrap() .as_array() .unwrap() - .into_iter() + .iter() .map(|x| x.as_str().unwrap().to_string()); let reason = txs.get("reason").unwrap().as_str().unwrap().to_string(); @@ -622,8 +624,7 @@ pub mod test_observer { // Find indexes in range for which we don't have burn block in set let missing = (start..=end) - .into_iter() - .filter(|i| !burn_block_heights.contains(&i)) + .filter(|i| !burn_block_heights.contains(i)) .collect::>(); if missing.is_empty() { @@ -664,9 +665,8 @@ pub fn next_block_and_wait_with_timeout( ) -> bool { let current = blocks_processed.load(Ordering::SeqCst); info!( - "Issuing block at {}, waiting for bump ({})", - get_epoch_time_secs(), - current + "Issuing block at {}, waiting for bump ({current})", + get_epoch_time_secs() ); btc_controller.build_next_block(1); let start = Instant::now(); @@ -693,9 +693,8 @@ pub fn next_block_and_iterate( ) -> bool { let current = blocks_processed.load(Ordering::SeqCst); eprintln!( - "Issuing block at {}, waiting for bump ({})", - get_epoch_time_secs(), - current + "Issuing block at {}, waiting for bump ({current})", + get_epoch_time_secs() ); btc_controller.build_next_block(1); let start = Instant::now(); @@ -725,20 +724,19 @@ pub fn run_until_burnchain_height( target_height: u64, conf: &Config, ) -> bool { - let tip_info = get_chain_info(&conf); + let tip_info = get_chain_info(conf); let mut current_height = tip_info.burn_block_height; while current_height < target_height { eprintln!( - "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({})", + "run_until_burnchain_height: Issuing block at {}, current_height burnchain height is ({current_height})", get_epoch_time_secs(), - current_height ); - let next_result = next_block_and_wait(btc_regtest_controller, &blocks_processed); + let next_result = next_block_and_wait(btc_regtest_controller, blocks_processed); if !next_result { return false; } - let Ok(tip_info) = get_chain_info_result(&conf) else { + let Ok(tip_info) = get_chain_info_result(conf) else { sleep_ms(1000); continue; }; @@ -764,15 +762,12 @@ pub fn wait_for_runloop(blocks_processed: &Arc) { pub fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64) -> bool { let mut current = microblocks_processed.load(Ordering::SeqCst); let start = Instant::now(); - info!("Waiting for next microblock (current = {})", ¤t); + info!("Waiting for next microblock (current = {current})"); loop { let now = microblocks_processed.load(Ordering::SeqCst); if now == 0 && current != 0 { // wrapped around -- a new epoch started - info!( - "New microblock epoch started while waiting (originally {})", - current - ); + info!("New microblock epoch started while waiting (originally {current})"); current = 0; } @@ -781,24 +776,24 @@ pub fn wait_for_microblocks(microblocks_processed: &Arc, timeout: u64 } if start.elapsed() > Duration::from_secs(timeout) { - warn!("Timed out waiting for microblocks to process ({})", timeout); + warn!("Timed out waiting for microblocks to process ({timeout})"); return false; } thread::sleep(Duration::from_millis(100)); } info!("Next microblock acknowledged"); - return true; + true } /// returns Txid string upon success -pub fn submit_tx_fallible(http_origin: &str, tx: &Vec) -> Result { +pub fn submit_tx_fallible(http_origin: &str, tx: &[u8]) -> Result { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/transactions", http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(tx.clone()) + .body(tx.to_vec()) .send() .unwrap(); if res.status().is_success() { @@ -817,16 +812,16 @@ pub fn submit_tx_fallible(http_origin: &str, tx: &Vec) -> Result) -> String { +pub fn submit_tx(http_origin: &str, tx: &[u8]) -> String { submit_tx_fallible(http_origin, tx).unwrap_or_else(|e| { - eprintln!("Submit tx error: {}", e); + eprintln!("Submit tx error: {e}"); panic!(""); }) } pub fn get_unconfirmed_tx(http_origin: &str, txid: &Txid) -> Option { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/transactions/unconfirmed/{}", http_origin, txid); + let path = format!("{http_origin}/v2/transactions/unconfirmed/{txid}"); let res = client.get(&path).send().unwrap(); if res.status().is_success() { @@ -840,14 +835,14 @@ pub fn get_unconfirmed_tx(http_origin: &str, txid: &Txid) -> Option { pub fn submit_block( http_origin: &str, consensus_hash: &ConsensusHash, - block: &Vec, + block: &[u8], ) -> StacksBlockAcceptedData { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/blocks/upload/{}", http_origin, consensus_hash); + let path = format!("{http_origin}/v2/blocks/upload/{consensus_hash}"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(block.clone()) + .body(block.to_owned()) .send() .unwrap(); @@ -862,21 +857,21 @@ pub fn submit_block( .block_hash() ) ); - return res; + res } else { eprintln!("{}", res.text().unwrap()); panic!(""); } } -pub fn submit_microblock(http_origin: &str, mblock: &Vec) -> BlockHeaderHash { +pub fn submit_microblock(http_origin: &str, mblock: &[u8]) -> BlockHeaderHash { let client = reqwest::blocking::Client::new(); let microblock = StacksMicroblock::consensus_deserialize(&mut &mblock[..]).unwrap(); - let path = format!("{}/v2/microblocks/{}", http_origin, microblock.block_hash()); + let path = format!("{http_origin}/v2/microblocks/{}", microblock.block_hash()); let res = client .post(&path) .header("Content-Type", "application/octet-stream") - .body(mblock.clone()) + .body(mblock.to_owned()) .send() .unwrap(); @@ -888,7 +883,7 @@ pub fn submit_microblock(http_origin: &str, mblock: &Vec) -> BlockHeaderHash .unwrap() .block_hash() ); - return res; + res } else { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -897,7 +892,7 @@ pub fn submit_microblock(http_origin: &str, mblock: &Vec) -> BlockHeaderHash pub fn get_block(http_origin: &str, block_id: &StacksBlockId) -> Option { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/blocks/{}", http_origin, block_id); + let path = format!("{http_origin}/v2/blocks/{block_id}"); let res = client.get(&path).send().unwrap(); if res.status().is_success() { @@ -939,7 +934,7 @@ pub fn get_tip_anchored_block(conf: &Config) -> (ConsensusHash, StacksBlock) { // get the associated anchored block let http_origin = format!("http://{}", &conf.node.rpc_bind); let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/blocks/{}", &http_origin, &stacks_id_tip); + let path = format!("{http_origin}/v2/blocks/{stacks_id_tip}"); let block_bytes = client.get(&path).send().unwrap().bytes().unwrap(); let block = StacksBlock::consensus_deserialize(&mut block_bytes.as_ref()).unwrap(); @@ -972,10 +967,7 @@ pub fn call_read_only( info!("Call read only: {contract}.{function}({args:?})"); - let path = format!( - "{http_origin}/v2/contracts/call-read/{}/{}/{}", - principal, contract, function - ); + let path = format!("{http_origin}/v2/contracts/call-read/{principal}/{contract}/{function}"); let serialized_args = args .iter() @@ -1005,14 +997,13 @@ fn find_microblock_privkey( let mut keychain = Keychain::default(conf.node.seed.clone()); for ix in 0..max_tries { // the first rotation occurs at 203. - let privk = - keychain.make_microblock_secret_key(203 + ix, &((203 + ix) as u64).to_be_bytes()); + let privk = keychain.make_microblock_secret_key(203 + ix, &(203 + ix).to_be_bytes()); let pubkh = Hash160::from_node_public_key(&StacksPublicKey::from_private(&privk)); if pubkh == *pubkey_hash { return Some(privk); } } - return None; + None } /// Returns true iff `b` is within `0.1%` of `a`. @@ -1073,7 +1064,7 @@ fn bitcoind_integration_test() { // let's query the miner's account nonce: - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); @@ -1088,7 +1079,7 @@ fn bitcoind_integration_test() { .filter(|block| block.get("burn_amount").unwrap().as_u64().unwrap() > 0) .collect(); assert!( - burn_blocks_with_burns.len() >= 1, + !burn_blocks_with_burns.is_empty(), "Burn block sortitions {} should be >= 1", burn_blocks_with_burns.len() ); @@ -1096,7 +1087,7 @@ fn bitcoind_integration_test() { // query for prometheus metrics #[cfg(feature = "monitoring_prom")] { - let prom_http_origin = format!("http://{}", prom_bind); + let prom_http_origin = format!("http://{prom_bind}"); let client = reqwest::blocking::Client::new(); let res = client .get(&prom_http_origin) @@ -1184,7 +1175,7 @@ fn confirm_unparsed_ongoing_ops() { bitcoin_regtest_controller::TEST_MAGIC_BYTES .lock() .unwrap() - .replace(['Z' as u8, 'Z' as u8]); + .replace([b'Z', b'Z']); // let's trigger another mining loop: this should create an invalid block commit. // this bitcoin block will contain the valid commit created before (so, a second stacks block) @@ -1209,7 +1200,7 @@ fn confirm_unparsed_ongoing_ops() { // query the miner's account nonce - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); @@ -1305,9 +1296,9 @@ fn most_recent_utxo_integration_test() { let smallest_utxo = smallest_utxo.unwrap(); let mut biggest_utxo = biggest_utxo.unwrap(); - eprintln!("Last-spent UTXO is {:?}", &last_utxo); - eprintln!("Smallest UTXO is {:?}", &smallest_utxo); - eprintln!("Biggest UTXO is {:?}", &biggest_utxo); + eprintln!("Last-spent UTXO is {last_utxo:?}"); + eprintln!("Smallest UTXO is {smallest_utxo:?}"); + eprintln!("Biggest UTXO is {biggest_utxo:?}"); assert_eq!(last_utxo, smallest_utxo); assert_ne!(biggest_utxo, last_utxo); @@ -1354,9 +1345,9 @@ pub fn get_account_result( account: &F, ) -> Result { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/accounts/{}?proof=0", http_origin, account); + let path = format!("{http_origin}/v2/accounts/{account}?proof=0"); let res = client.get(&path).send()?.json::()?; - info!("Account response: {:#?}", res); + info!("Account response: {res:#?}"); Ok(Account { balance: u128::from_str_radix(&res.balance[2..], 16).unwrap(), locked: u128::from_str_radix(&res.locked[2..], 16).unwrap(), @@ -1371,19 +1362,19 @@ pub fn get_account(http_origin: &str, account: &F) -> Acco pub fn get_neighbors(conf: &Config) -> Option { let client = reqwest::blocking::Client::new(); let http_origin = format!("http://{}", &conf.node.rpc_bind); - let path = format!("{}/v2/neighbors", http_origin); + let path = format!("{http_origin}/v2/neighbors"); client.get(&path).send().ok()?.json().ok() } pub fn get_pox_info(http_origin: &str) -> Option { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/pox", http_origin); + let path = format!("{http_origin}/v2/pox"); client.get(&path).send().ok()?.json::().ok() } fn get_chain_tip(http_origin: &str) -> (ConsensusHash, BlockHeaderHash) { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/info", http_origin); + let path = format!("{http_origin}/v2/info"); let res = client .get(&path) .send() @@ -1404,7 +1395,7 @@ fn get_chain_tip(http_origin: &str) -> (ConsensusHash, BlockHeaderHash) { fn get_chain_tip_height(http_origin: &str) -> u64 { let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/info", http_origin); + let path = format!("{http_origin}/v2/info"); let res = client .get(&path) .send() @@ -1427,10 +1418,8 @@ pub fn get_contract_src( } else { "".to_string() }; - let path = format!( - "{}/v2/contracts/source/{}/{}{}", - http_origin, contract_addr, contract_name, query_string - ); + let path = + format!("{http_origin}/v2/contracts/source/{contract_addr}/{contract_name}{query_string}"); let res = client.get(&path).send().unwrap(); if res.status().is_success() { @@ -1719,7 +1708,7 @@ fn liquid_ustx_integration() { let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); assert_eq!(&dropped_txs[0].1, "ReplaceByFee"); - assert_eq!(&dropped_txs[0].0, &format!("0x{}", replaced_txid)); + assert_eq!(&dropped_txs[0].0, &format!("0x{replaced_txid}")); // mine 1 burn block for the miner to issue the next block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -1848,7 +1837,7 @@ fn lockup_integration() { } } } - assert_eq!(found, true); + assert!(found); // block #2 won't unlock STX next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -1882,7 +1871,7 @@ fn stx_transfer_btc_integration_test() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let _spender_btc_addr = BitcoinAddress::from_bytes_legacy( BitcoinNetworkType::Regtest, LegacyBitcoinAddressType::PublicKeyHash, @@ -1892,7 +1881,7 @@ fn stx_transfer_btc_integration_test() { let spender_2_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_2_stx_addr: StacksAddress = to_addr(&spender_2_sk); - let spender_2_addr: PrincipalData = spender_2_stx_addr.clone().into(); + let spender_2_addr: PrincipalData = spender_2_stx_addr.into(); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -1948,7 +1937,7 @@ fn stx_transfer_btc_integration_test() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -1975,8 +1964,8 @@ fn stx_transfer_btc_integration_test() { let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); let transfer_stx_op = TransferStxOp { - sender: spender_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -1986,7 +1975,7 @@ fn stx_transfer_btc_integration_test() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller @@ -2017,7 +2006,7 @@ fn stx_transfer_btc_integration_test() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_2_stx_addr.clone(), + output: spender_2_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2046,8 +2035,8 @@ fn stx_transfer_btc_integration_test() { // let's fire off our transfer op. let transfer_stx_op = TransferStxOp { - sender: spender_2_stx_addr.clone(), - recipient: recipient_addr.clone(), + sender: spender_2_stx_addr, + recipient: recipient_addr, transfered_ustx: 100_000, memo: vec![], // to be filled in @@ -2057,7 +2046,7 @@ fn stx_transfer_btc_integration_test() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; - let mut spender_signer = BurnchainOpSigner::new(spender_2_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_2_sk, false); btc_regtest_controller .submit_manual( @@ -2111,7 +2100,7 @@ fn stx_delegate_btc_integration_test() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let recipient_sk = StacksPrivateKey::new(); let recipient_addr = to_addr(&recipient_sk); @@ -2119,11 +2108,7 @@ fn stx_delegate_btc_integration_test() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -2132,12 +2117,12 @@ fn stx_delegate_btc_integration_test() { amount: 100300, }); conf.initial_balances.push(InitialBalance { - address: recipient_addr.clone().into(), + address: recipient_addr.into(), amount: 300, }); // update epoch info so that Epoch 2.1 takes effect - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -2159,7 +2144,7 @@ fn stx_delegate_btc_integration_test() { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]); + ])); conf.burnchain.pox_2_activation = Some(3); test_observer::spawn(); @@ -2226,7 +2211,7 @@ fn stx_delegate_btc_integration_test() { // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2252,8 +2237,8 @@ fn stx_delegate_btc_integration_test() { // let's fire off our delegate op. let del_stx_op = DelegateStxOp { - sender: spender_stx_addr.clone(), - delegate_to: recipient_addr.clone(), + sender: spender_stx_addr, + delegate_to: recipient_addr, reward_addr: None, delegated_ustx: 100_000, // to be filled in @@ -2264,7 +2249,7 @@ fn stx_delegate_btc_integration_test() { until_burn_height: None, }; - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk, false); assert!( btc_regtest_controller .submit_operation( @@ -2298,7 +2283,7 @@ fn stx_delegate_btc_integration_test() { Value::Principal(spender_addr.clone()), Value::UInt(100_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -2372,7 +2357,7 @@ fn stack_stx_burn_op_test() { let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr_1: StacksAddress = to_addr(&spender_sk_1); - let spender_addr_1: PrincipalData = spender_stx_addr_1.clone().into(); + let spender_addr_1: PrincipalData = spender_stx_addr_1.into(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); let spender_stx_addr_2: StacksAddress = to_addr(&spender_sk_2); @@ -2390,12 +2375,12 @@ fn stack_stx_burn_op_test() { amount: first_bal, }); conf.initial_balances.push(InitialBalance { - address: recipient_addr.clone().into(), + address: recipient_addr.into(), amount: second_bal, }); // update epoch info so that Epoch 2.1 takes effect - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -2445,7 +2430,7 @@ fn stack_stx_burn_op_test() { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_5, }, - ]); + ])); conf.burnchain.pox_2_activation = Some(3); test_observer::spawn(); @@ -2506,8 +2491,8 @@ fn stack_stx_burn_op_test() { info!("Bootstrapped to 2.5, submitting stack-stx and pre-stx op..."); - let signer_sk_1 = spender_sk_1.clone(); - let signer_sk_2 = spender_sk_2.clone(); + let signer_sk_1 = spender_sk_1; + let signer_sk_2 = spender_sk_2; let signer_pk_1 = StacksPublicKey::from_private(&signer_sk_1); let pox_addr = PoxAddress::Standard(spender_stx_addr_1, Some(AddressHashMode::SerializeP2PKH)); @@ -2540,7 +2525,7 @@ fn stack_stx_burn_op_test() { let mut miner_signer_2 = Keychain::default(conf.node.seed.clone()).generate_op_signer(); let pre_stx_op_2 = PreStxOp { - output: spender_stx_addr_2.clone(), + output: spender_stx_addr_2, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -2619,13 +2604,13 @@ fn stack_stx_burn_op_test() { // `stacked_ustx` should be large enough to avoid ERR_STACKING_THRESHOLD_NOT_MET from Clarity let stack_stx_op_with_some_signer_key = BlockstackOperationType::StackStx(StackStxOp { - sender: spender_stx_addr_1.clone(), + sender: spender_stx_addr_1, reward_addr: pox_addr.clone(), stacked_ustx: 10000000000000, num_cycles: 6, signer_key: Some(signer_key), max_amount: Some(u128::MAX), - auth_id: Some(auth_id.into()), + auth_id: Some(auth_id), // to be filled in vtxindex: 0, txid: Txid([0u8; 32]), @@ -2633,7 +2618,7 @@ fn stack_stx_burn_op_test() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut spender_signer_1 = BurnchainOpSigner::new(signer_sk_1.clone(), false); + let mut spender_signer_1 = BurnchainOpSigner::new(signer_sk_1, false); assert!( btc_regtest_controller .submit_operation( @@ -2647,7 +2632,7 @@ fn stack_stx_burn_op_test() { ); let stack_stx_op_with_no_signer_key = BlockstackOperationType::StackStx(StackStxOp { - sender: spender_stx_addr_2.clone(), + sender: spender_stx_addr_2, reward_addr: pox_addr.clone(), stacked_ustx: 10000000000000, num_cycles: 6, @@ -2661,7 +2646,7 @@ fn stack_stx_burn_op_test() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut spender_signer_2 = BurnchainOpSigner::new(signer_sk_2.clone(), false); + let mut spender_signer_2 = BurnchainOpSigner::new(signer_sk_2, false); assert!( btc_regtest_controller .submit_operation( @@ -2740,7 +2725,7 @@ fn stack_stx_burn_op_test() { for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh).unwrap(); for stacking_op in stacking_ops.into_iter() { - debug!("Stacking op queried from sortdb: {:?}", stacking_op); + debug!("Stacking op queried from sortdb: {stacking_op:?}"); match stacking_op.signer_key { Some(_) => found_some = true, None => found_none = true, @@ -2775,17 +2760,13 @@ fn vote_for_aggregate_key_burn_op_test() { let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); + let spender_addr: PrincipalData = spender_stx_addr.into(); let pox_pubkey = Secp256k1PublicKey::from_hex( "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let _pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let _pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let (mut conf, _miner_account) = neon_integration_test_conf(); @@ -2798,7 +2779,7 @@ fn vote_for_aggregate_key_burn_op_test() { }); // update epoch info so that Epoch 2.1 takes effect - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -2848,7 +2829,7 @@ fn vote_for_aggregate_key_burn_op_test() { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_5, }, - ]); + ])); conf.burnchain.pox_2_activation = Some(3); test_observer::spawn(); @@ -2912,7 +2893,7 @@ fn vote_for_aggregate_key_burn_op_test() { // setup stack-stx tx - let signer_sk = spender_sk.clone(); + let signer_sk = spender_sk; let signer_pk = StacksPublicKey::from_private(&signer_sk); let pox_addr = PoxAddress::Standard(spender_stx_addr, Some(AddressHashMode::SerializeP2PKH)); @@ -2959,7 +2940,7 @@ fn vote_for_aggregate_key_burn_op_test() { let mut miner_signer = Keychain::default(conf.node.seed.clone()).generate_op_signer(); let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), + output: spender_stx_addr, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -3012,7 +2993,7 @@ fn vote_for_aggregate_key_burn_op_test() { BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { signer_key, signer_index, - sender: spender_stx_addr.clone(), + sender: spender_stx_addr, round: 0, reward_cycle, aggregate_key, @@ -3023,7 +3004,7 @@ fn vote_for_aggregate_key_burn_op_test() { burn_header_hash: BurnchainHeaderHash::zero(), }); - let mut spender_signer = BurnchainOpSigner::new(signer_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(signer_sk, false); assert!( btc_regtest_controller .submit_operation( @@ -3251,16 +3232,16 @@ fn bitcoind_forking_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); while sort_height < 210 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // let's query the miner's account nonce: - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); @@ -3355,17 +3336,17 @@ fn should_fix_2771() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); while sort_height < 210 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // okay, let's figure out the burn block we want to fork away. let reorg_height = 208; - warn!("Will trigger re-org at block {}", reorg_height); + warn!("Will trigger re-org at block {reorg_height}"); let burn_header_hash_to_fork = btc_regtest_controller.get_block_hash(reorg_height); btc_regtest_controller.invalidate_block(&burn_header_hash_to_fork); btc_regtest_controller.build_next_block(1); @@ -3407,10 +3388,10 @@ fn make_signed_microblock( version: rng.gen(), sequence: seq, prev_block: parent_block, - tx_merkle_root: tx_merkle_root, + tx_merkle_root, signature: MessageSignature([0u8; 65]), }, - txs: txs, + txs, }; mblock.sign(block_privk).unwrap(); mblock @@ -3574,9 +3555,8 @@ fn microblock_fork_poison_integration_test() { ); eprintln!( - "Created first microblock: {}: {:?}", - &first_microblock.block_hash(), - &first_microblock + "Created first microblock: {}: {first_microblock:?}", + &first_microblock.block_hash() ); // NOTE: this microblock conflicts because it has the same parent as the first microblock, @@ -3585,9 +3565,8 @@ fn microblock_fork_poison_integration_test() { make_signed_microblock(&privk, vec![second_unconfirmed_tx], stacks_tip, 1); eprintln!( - "Created second conflicting microblock: {}: {:?}", - &second_microblock.block_hash(), - &second_microblock + "Created second conflicting microblock: {}: {second_microblock:?}", + &second_microblock.block_hash() ); (first_microblock, second_microblock) }; @@ -3598,7 +3577,7 @@ fn microblock_fork_poison_integration_test() { .unwrap(); // post the first microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -3616,7 +3595,7 @@ fn microblock_fork_poison_integration_test() { .unwrap(); // post the second microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -3737,7 +3716,7 @@ fn microblock_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // let's query the miner's account nonce: - info!("Miner account: {}", miner_account); + info!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 1); @@ -3854,9 +3833,8 @@ fn microblock_integration_test() { ); eprintln!( - "Created first microblock: {}: {:?}", - &first_microblock.block_hash(), - &first_microblock + "Created first microblock: {}: {first_microblock:?}", + &first_microblock.block_hash() ); /* let second_microblock = @@ -3869,9 +3847,8 @@ fn microblock_integration_test() { 1, ); eprintln!( - "Created second microblock: {}: {:?}", - &second_microblock.block_hash(), - &second_microblock + "Created second microblock: {}: {second_microblock:?}", + &second_microblock.block_hash() ); (first_microblock, second_microblock) }; @@ -3882,7 +3859,7 @@ fn microblock_integration_test() { .unwrap(); // post the first microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -3894,7 +3871,7 @@ fn microblock_integration_test() { assert_eq!(res, format!("{}", &first_microblock.block_hash())); - eprintln!("\n\nBegin testing\nmicroblock: {:?}\n\n", &first_microblock); + eprintln!("\n\nBegin testing\nmicroblock: {first_microblock:?}\n\n"); let account = get_account(&http_origin, &spender_addr); assert_eq!(account.nonce, 1); @@ -3906,7 +3883,7 @@ fn microblock_integration_test() { .unwrap(); // post the second microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -4037,7 +4014,7 @@ fn microblock_integration_test() { burn_blocks_with_burns.len() ); for burn_block in burn_blocks_with_burns { - eprintln!("{}", burn_block); + eprintln!("{burn_block}"); } let mut prior = None; @@ -4090,13 +4067,11 @@ fn microblock_integration_test() { // we can query unconfirmed state from the microblock we announced let path = format!( - "{}/v2/accounts/{}?proof=0&tip={}", - &http_origin, - &spender_addr, + "{http_origin}/v2/accounts/{spender_addr}?proof=0&tip={}", &tip_info.unanchored_tip.unwrap() ); - eprintln!("{:?}", &path); + eprintln!("{path:?}"); let mut iter_count = 0; let res = loop { @@ -4107,7 +4082,7 @@ fn microblock_integration_test() { match http_resp.json::() { Ok(x) => break x, Err(e) => { - warn!("Failed to query {}; will try again. Err = {:?}", &path, e); + warn!("Failed to query {path}; will try again. Err = {e:?}"); iter_count += 1; assert!(iter_count < 10, "Retry limit reached querying account"); sleep_ms(1000); @@ -4116,17 +4091,14 @@ fn microblock_integration_test() { }; }; - info!("Account Response = {:#?}", res); + info!("Account Response = {res:#?}"); assert_eq!(res.nonce, 2); assert_eq!(u128::from_str_radix(&res.balance[2..], 16).unwrap(), 96300); // limited by chaining for next_nonce in 2..5 { // verify that the microblock miner can automatically pick up transactions - debug!( - "Try to send unconfirmed tx from {} to {} nonce {}", - &spender_addr, &recipient, next_nonce - ); + debug!("Try to send unconfirmed tx from {spender_addr} to {recipient} nonce {next_nonce}"); let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( &spender_sk, next_nonce, @@ -4136,14 +4108,14 @@ fn microblock_integration_test() { 1000, ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(unconfirmed_tx_bytes.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -4153,7 +4125,7 @@ fn microblock_integration_test() { .txid() .to_string() ); - eprintln!("Sent {}", &res); + eprintln!("Sent {res}"); } else { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -4171,15 +4143,13 @@ fn microblock_integration_test() { // we can query _new_ unconfirmed state from the microblock we announced let path = format!( - "{}/v2/accounts/{}?proof=0&tip={}", - &http_origin, - &spender_addr, + "{http_origin}/v2/accounts/{spender_addr}?proof=0&tip={}", &tip_info.unanchored_tip.unwrap() ); let res_text = client.get(&path).send().unwrap().text().unwrap(); - eprintln!("text of {}\n{}", &path, &res_text); + eprintln!("text of {path}\n{res_text}"); let res = client .get(&path) @@ -4187,8 +4157,8 @@ fn microblock_integration_test() { .unwrap() .json::() .unwrap(); - eprintln!("{:?}", &path); - eprintln!("{:#?}", res); + eprintln!("{path:?}"); + eprintln!("{res:#?}"); // advanced! assert_eq!(res.nonce, next_nonce + 1); @@ -4209,10 +4179,7 @@ fn filter_low_fee_tx_integration_test() { return; } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, _) = neon_integration_test_conf(); @@ -4232,7 +4199,7 @@ fn filter_low_fee_tx_integration_test() { if ix < 5 { // low-fee make_stacks_transfer( - &spender_sk, + spender_sk, 0, 1000 + (ix as u64), conf.burnchain.chain_id, @@ -4242,7 +4209,7 @@ fn filter_low_fee_tx_integration_test() { } else { // high-fee make_stacks_transfer( - &spender_sk, + spender_sk, 0, 2000 + (ix as u64), conf.burnchain.chain_id, @@ -4296,14 +4263,9 @@ fn filter_low_fee_tx_integration_test() { // First five accounts have a transaction. The miner will consider low fee transactions, // but rank by estimated fee rate. - for i in 0..5 { - let account = get_account(&http_origin, &spender_addrs[i]); - assert_eq!(account.nonce, 1); - } - - // last five accounts have transaction - for i in 5..10 { - let account = get_account(&http_origin, &spender_addrs[i]); + // Last five accounts have transaction + for spender_addr in &spender_addrs { + let account = get_account(&http_origin, spender_addr); assert_eq!(account.nonce, 1); } @@ -4317,10 +4279,7 @@ fn filter_long_runtime_tx_integration_test() { return; } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, _) = neon_integration_test_conf(); @@ -4341,7 +4300,7 @@ fn filter_long_runtime_tx_integration_test() { .map(|(ix, spender_sk)| { let recipient = StacksAddress::from_string(ADDR_4).unwrap(); make_stacks_transfer( - &spender_sk, + spender_sk, 0, 1000 + (ix as u64), conf.burnchain.chain_id, @@ -4393,8 +4352,8 @@ fn filter_long_runtime_tx_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // no transactions mined - for i in 0..10 { - let account = get_account(&http_origin, &spender_addrs[i]); + for spender_addr in &spender_addrs { + let account = get_account(&http_origin, &spender_addr); assert_eq!(account.nonce, 0); } @@ -4517,10 +4476,7 @@ fn size_check_integration_test() { giant_contract.push(' '); } - let spender_sks: Vec<_> = (0..10) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..10).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4652,13 +4608,10 @@ fn size_check_integration_test() { panic!("Spender address nonce incremented past 1"); } - debug!("Spender {},{}: {:?}", ix, &spender_addr, &res); + debug!("Spender {ix},{spender_addr}: {res:?}"); } - eprintln!( - "anchor_block_txs: {}, micro_block_txs: {}", - anchor_block_txs, micro_block_txs - ); + eprintln!("anchor_block_txs: {anchor_block_txs}, micro_block_txs: {micro_block_txs}"); if anchor_block_txs >= 2 && micro_block_txs >= 2 { break; @@ -4693,10 +4646,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..5) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..5).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4723,7 +4673,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { i as u64, 1100000, conf.burnchain.chain_id, - &format!("small-{}", i), + &format!("small-{i}"), &small_contract, ); ret.push(tx); @@ -4849,10 +4799,10 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("large-").is_some() { + if tsc.name.to_string().contains("large-") { num_big_anchored_txs += 1; total_big_txs_per_block += 1; - } else if tsc.name.to_string().find("small").is_some() { + } else if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_per_microblock += 1; } @@ -4868,8 +4818,7 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { } eprintln!( - "max_big_txs_per_microblock: {}, max_big_txs_per_block: {}, total_big_txs_per_block: {}, total_big_txs_per_microblock: {}", - max_big_txs_per_microblock, max_big_txs_per_block, total_big_txs_per_block, total_big_txs_per_microblock + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, max_big_txs_per_block: {max_big_txs_per_block}, total_big_txs_per_block: {total_big_txs_per_block}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" ); assert!(max_big_txs_per_block > 0); @@ -4902,10 +4851,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..20) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..20).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4930,15 +4876,14 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { let txs: Vec<_> = spender_sks .iter() .map(|spender_sk| { - let tx = make_contract_publish_microblock_only( + make_contract_publish_microblock_only( spender_sk, 0, 600000, conf.burnchain.chain_id, "small", &small_contract, - ); - tx + ) }) .collect(); @@ -5049,7 +4994,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("small").is_some() { + if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_per_microblock += 1; } @@ -5061,8 +5006,7 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { } eprintln!( - "max_big_txs_per_microblock: {}, total_big_txs_per_microblock: {}", - max_big_txs_per_microblock, total_big_txs_per_microblock + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" ); assert_eq!(max_big_txs_per_microblock, 5); @@ -5090,10 +5034,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { small_contract.push(' '); } - let spender_sks: Vec<_> = (0..25) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..25).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -5115,20 +5056,19 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { let txs: Vec> = spender_sks .iter() .map(|spender_sk| { - let tx = make_contract_publish_microblock_only( + make_contract_publish_microblock_only( spender_sk, 0, 1149230, conf.burnchain.chain_id, "small", &small_contract, - ); - tx + ) }) .collect(); - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].block_limit = core::BLOCK_LIMIT_MAINNET_20; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].block_limit = core::BLOCK_LIMIT_MAINNET_20; conf.burnchain.epochs = Some(epochs); conf.miner.first_attempt_time_ms = i64::MAX as u64; @@ -5222,7 +5162,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("small").is_some() { + if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_per_microblock += 1; } @@ -5234,8 +5174,7 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { } eprintln!( - "max_big_txs_per_microblock: {}, total_big_txs_per_microblock: {}", - max_big_txs_per_microblock, total_big_txs_per_microblock + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, total_big_txs_per_microblock: {total_big_txs_per_microblock}" ); assert_eq!(max_big_txs_per_microblock, 3); @@ -5252,13 +5191,9 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { return; } - let spender_sks: Vec<_> = (0..4) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let spender_sks: Vec<_> = (0..4).map(|_| StacksPrivateKey::new()).collect(); let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - let spender_addrs_c32: Vec = - spender_sks.iter().map(|x| to_addr(x).into()).collect(); + let spender_addrs_c32: Vec = spender_sks.iter().map(to_addr).collect(); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -5277,8 +5212,8 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].block_limit = core::BLOCK_LIMIT_MAINNET_20; + let mut epochs = EpochList::new(&*core::STACKS_EPOCHS_REGTEST); + epochs[StacksEpochId::Epoch20].block_limit = core::BLOCK_LIMIT_MAINNET_20; conf.burnchain.epochs = Some(epochs); let txs: Vec> = spender_sks @@ -5292,7 +5227,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { 0, 1049230, conf.burnchain.chain_id, - &format!("large-{}", ix), + &format!("large-{ix}"), &format!(" ;; a single one of these transactions consumes over half the runtime budget (define-constant BUFF_TO_BYTE (list @@ -5334,9 +5269,9 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { ) ) (begin - (crash-me \"{}\")) + (crash-me \"large-contract-{}-{ix}\")) ", - &format!("large-contract-{}-{}", &spender_addrs_c32[ix], &ix) + &spender_addrs_c32[ix] ) )] } else { @@ -5347,7 +5282,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { i as u64, 210000, conf.burnchain.chain_id, - &format!("small-{}-{}", ix, i), + &format!("small-{ix}-{i}"), &format!(" ;; a single one of these transactions consumes over half the runtime budget (define-constant BUFF_TO_BYTE (list @@ -5389,8 +5324,8 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { ) ) (begin - (crash-me \"{}\")) - ", &format!("small-contract-{}-{}-{}", &spender_addrs_c32[ix], &ix, i)) + (crash-me \"small-contract-{}-{ix}-{i}\")) + ", spender_addrs_c32[ix]) ); ret.push(tx); } @@ -5486,7 +5421,7 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { let mut total_big_txs_in_microblocks = 0; for block in blocks { - eprintln!("block {:?}", &block); + eprintln!("block {block:?}"); let transactions = block.get("transactions").unwrap().as_array().unwrap(); let mut num_big_anchored_txs = 0; @@ -5499,12 +5434,12 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - eprintln!("tx: {:?}", &parsed); + eprintln!("tx: {parsed:?}"); if let TransactionPayload::SmartContract(tsc, ..) = parsed.payload { - if tsc.name.to_string().find("large-").is_some() { + if tsc.name.to_string().contains("large-") { num_big_anchored_txs += 1; total_big_txs_in_blocks += 1; - } else if tsc.name.to_string().find("small").is_some() { + } else if tsc.name.to_string().contains("small") { num_big_microblock_txs += 1; total_big_txs_in_microblocks += 1; } @@ -5520,12 +5455,10 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { } info!( - "max_big_txs_per_microblock: {}, max_big_txs_per_block: {}", - max_big_txs_per_microblock, max_big_txs_per_block + "max_big_txs_per_microblock: {max_big_txs_per_microblock}, max_big_txs_per_block: {max_big_txs_per_block}" ); info!( - "total_big_txs_in_microblocks: {}, total_big_txs_in_blocks: {}", - total_big_txs_in_microblocks, total_big_txs_in_blocks + "total_big_txs_in_microblocks: {total_big_txs_in_microblocks}, total_big_txs_in_blocks: {total_big_txs_in_blocks}" ); // at most one big tx per block and at most one big tx per stream, always. @@ -5605,7 +5538,7 @@ fn block_replay_integration_test() { // let's query the miner's account nonce: - info!("Miner account: {}", miner_account); + info!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 1); @@ -5638,7 +5571,7 @@ fn block_replay_integration_test() { tip_block.consensus_serialize(&mut tip_block_bytes).unwrap(); for i in 0..1024 { - let path = format!("{}/v2/blocks/upload/{}", &http_origin, &tip_consensus_hash); + let path = format!("{http_origin}/v2/blocks/upload/{tip_consensus_hash}"); let res_text = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -5648,7 +5581,7 @@ fn block_replay_integration_test() { .text() .unwrap(); - eprintln!("{}: text of {}\n{}", i, &path, &res_text); + eprintln!("{i}: text of {path}\n{res_text}"); } test_observer::clear(); @@ -6022,11 +5955,11 @@ fn mining_events_integration_test() { let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10000000, }); conf.initial_balances.push(InitialBalance { - address: addr_2.clone().into(), + address: addr_2.into(), amount: 10000000, }); @@ -6121,7 +6054,7 @@ fn mining_events_integration_test() { // check mined microblock events let mined_microblock_events = test_observer::get_mined_microblocks(); - assert!(mined_microblock_events.len() >= 1); + assert!(!mined_microblock_events.is_empty()); // check tx events in the first microblock // 1 success: 1 contract publish, 2 error (on chain transactions) @@ -6136,15 +6069,12 @@ fn mining_events_integration_test() { execution_cost, .. }) => { - assert_eq!( - result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap(), - true - ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); assert_eq!(fee, &620000); assert_eq!( execution_cost, @@ -6176,15 +6106,12 @@ fn mining_events_integration_test() { txid.to_string(), "3e04ada5426332bfef446ba0a06d124aace4ade5c11840f541bf88e2e919faf6" ); - assert_eq!( - result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap(), - true - ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); } _ => panic!("unexpected event type"), } @@ -6197,15 +6124,12 @@ fn mining_events_integration_test() { execution_cost, .. }) => { - assert_eq!( - result - .clone() - .expect_result_ok() - .unwrap() - .expect_bool() - .unwrap(), - true - ); + assert!(result + .clone() + .expect_result_ok() + .unwrap() + .expect_bool() + .unwrap()); assert_eq!(fee, &600000); assert_eq!( execution_cost, @@ -6304,7 +6228,7 @@ fn block_limit_hit_integration_test() { let (mut conf, _miner_account) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10_000_000, }); conf.initial_balances.push(InitialBalance { @@ -6432,8 +6356,8 @@ fn block_limit_hit_integration_test() { assert_eq!(tx_third_block.len(), 3); let txid_1_exp = tx_third_block[1].get("txid").unwrap().as_str().unwrap(); let txid_4_exp = tx_third_block[2].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_1), txid_1_exp); - assert_eq!(format!("0x{}", txid_4), txid_4_exp); + assert_eq!(format!("0x{txid_1}"), txid_1_exp); + assert_eq!(format!("0x{txid_4}"), txid_4_exp); let tx_fourth_block = mined_block_events[4] .get("transactions") @@ -6443,8 +6367,8 @@ fn block_limit_hit_integration_test() { assert_eq!(tx_fourth_block.len(), 3); let txid_2_exp = tx_fourth_block[1].get("txid").unwrap().as_str().unwrap(); let txid_3_exp = tx_fourth_block[2].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_2), txid_2_exp); - assert_eq!(format!("0x{}", txid_3), txid_3_exp); + assert_eq!(format!("0x{txid_2}"), txid_2_exp); + assert_eq!(format!("0x{txid_3}"), txid_3_exp); test_observer::clear(); channel.stop_chains_coordinator(); @@ -6516,7 +6440,7 @@ fn microblock_limit_hit_integration_test() { let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10_000_000, }); conf.initial_balances.push(InitialBalance { @@ -6540,7 +6464,7 @@ fn microblock_limit_hit_integration_test() { conf.miner.first_attempt_time_ms = i64::MAX as u64; conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -6575,7 +6499,7 @@ fn microblock_limit_hit_integration_test() { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]); + ])); conf.burnchain.pox_2_activation = Some(10_003); // included in the first block @@ -6656,10 +6580,7 @@ fn microblock_limit_hit_integration_test() { let txid_3 = submit_tx(&http_origin, &tx_3); let txid_4 = submit_tx(&http_origin, &tx_4); - eprintln!( - "transactions: {},{},{},{}", - &txid_1, &txid_2, &txid_3, &txid_4 - ); + eprintln!("transactions: {txid_1},{txid_2},{txid_3},{txid_4}"); sleep_ms(50_000); @@ -6702,8 +6623,8 @@ fn microblock_limit_hit_integration_test() { assert_eq!(tx_first_mblock.len(), 2); let txid_1_exp = tx_first_mblock[0].get("txid").unwrap().as_str().unwrap(); let txid_4_exp = tx_first_mblock[1].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_1), txid_1_exp); - assert_eq!(format!("0x{}", txid_4), txid_4_exp); + assert_eq!(format!("0x{txid_1}"), txid_1_exp); + assert_eq!(format!("0x{txid_4}"), txid_4_exp); let tx_second_mblock = mined_mblock_events[1] .get("transactions") @@ -6713,8 +6634,8 @@ fn microblock_limit_hit_integration_test() { assert_eq!(tx_second_mblock.len(), 2); let txid_2_exp = tx_second_mblock[0].get("txid").unwrap().as_str().unwrap(); let txid_3_exp = tx_second_mblock[1].get("txid").unwrap().as_str().unwrap(); - assert_eq!(format!("0x{}", txid_2), txid_2_exp); - assert_eq!(format!("0x{}", txid_3), txid_3_exp); + assert_eq!(format!("0x{txid_2}"), txid_2_exp); + assert_eq!(format!("0x{txid_3}"), txid_3_exp); test_observer::clear(); channel.stop_chains_coordinator(); @@ -6761,7 +6682,7 @@ fn block_large_tx_integration_test() { test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { - address: spender_addr.clone().into(), + address: spender_addr.into(), amount: 10000000, }); @@ -6837,10 +6758,7 @@ fn block_large_tx_integration_test() { let normal_txid = submit_tx(&http_origin, &tx); let huge_txid = submit_tx(&http_origin, &tx_2); - eprintln!( - "Try to mine a too-big tx. Normal = {}, TooBig = {}", - &normal_txid, &huge_txid - ); + eprintln!("Try to mine a too-big tx. Normal = {normal_txid}, TooBig = {huge_txid}"); next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 1200); eprintln!("Finished trying to mine a too-big tx"); @@ -6848,7 +6766,7 @@ fn block_large_tx_integration_test() { let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); assert_eq!(&dropped_txs[0].1, "TooExpensive"); - assert_eq!(&dropped_txs[0].0, &format!("0x{}", huge_txid)); + assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); test_observer::clear(); channel.stop_chains_coordinator(); @@ -6898,7 +6816,7 @@ fn microblock_large_tx_integration_test_FLAKY() { test_observer::register_any(&mut conf); conf.initial_balances.push(InitialBalance { - address: addr.clone().into(), + address: addr.into(), amount: 10000000, }); @@ -6981,7 +6899,7 @@ fn microblock_large_tx_integration_test_FLAKY() { // Check that the microblock contains the first tx. let microblock_events = test_observer::get_microblocks(); - assert!(microblock_events.len() >= 1); + assert!(!microblock_events.is_empty()); let microblock = microblock_events[0].clone(); let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); @@ -6994,7 +6912,7 @@ fn microblock_large_tx_integration_test_FLAKY() { let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); assert_eq!(&dropped_txs[0].1, "TooExpensive"); - assert_eq!(&dropped_txs[0].0, &format!("0x{}", huge_txid)); + assert_eq!(&dropped_txs[0].0, &format!("0x{huge_txid}")); test_observer::clear(); channel.stop_chains_coordinator(); @@ -7020,18 +6938,10 @@ fn pox_integration_test() { "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", ) .unwrap(); - let pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_pubkey).to_bytes()); let pox_2_pubkey = Secp256k1PublicKey::from_private(&StacksPrivateKey::new()); - let pox_2_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_2_pubkey) - .to_bytes() - .to_vec(), - ); + let pox_2_pubkey_hash = bytes_to_hex(&Hash160::from_node_public_key(&pox_2_pubkey).to_bytes()); let pox_2_address = BitcoinAddress::from_bytes_legacy( BitcoinNetworkType::Testnet, @@ -7145,15 +7055,12 @@ fn pox_integration_test() { let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 0); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 0); assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 210); @@ -7191,7 +7098,7 @@ fn pox_integration_test() { &[ Value::UInt(stacked_bal), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -7205,14 +7112,14 @@ fn pox_integration_test() { submit_tx(&http_origin, &tx); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); test_observer::clear(); // now let's mine until the next reward cycle starts ... while sort_height < ((14 * pox_constants.reward_cycle_length) + 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); @@ -7220,16 +7127,13 @@ fn pox_integration_test() { .block_height_to_reward_cycle(sort_height) .expect("Expected to be able to get reward cycle"); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); assert!(pox_info.pox_activation_threshold_ustx > 1500000000000000); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 1000000000000000); assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); @@ -7281,8 +7185,7 @@ fn pox_integration_test() { // 14, and goes for 6 blocks, so we unlock in reward cycle 20, which with a reward // cycle length of 15 blocks, is a burnchain height of 300) assert_eq!(parsed.to_string(), - format!("(ok (tuple (lock-amount u1000000000000000) (stacker {}) (unlock-burn-height u300)))", - &spender_addr)); + format!("(ok (tuple (lock-amount u1000000000000000) (stacker {spender_addr}) (unlock-burn-height u300)))")); tested = true; } } @@ -7307,7 +7210,7 @@ fn pox_integration_test() { &[ Value::UInt(stacked_bal / 2), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_2_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -7331,7 +7234,7 @@ fn pox_integration_test() { &[ Value::UInt(stacked_bal / 2), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_2_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_2_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -7348,20 +7251,17 @@ fn pox_integration_test() { while sort_height < ((15 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.next_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 1000000000000000); - assert_eq!(pox_info.current_cycle.is_pox_active, false); + assert!(!pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.stacked_ustx, 2000000000000000); assert_eq!(pox_info.reward_slots as u32, pox_constants.reward_slots()); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 225); @@ -7403,19 +7303,16 @@ fn pox_integration_test() { while sort_height < ((16 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); - assert_eq!( - &pox_info.contract_id, - &format!("ST000000000000000000002AMW42H.pox") - ); + assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); assert_eq!(pox_info.first_burnchain_block_height, 0); assert_eq!(pox_info.current_cycle.min_threshold_ustx, 125080000000000); assert_eq!(pox_info.current_cycle.stacked_ustx, 2000000000000000); - assert_eq!(pox_info.current_cycle.is_pox_active, true); + assert!(pox_info.current_cycle.is_pox_active); assert_eq!(pox_info.next_cycle.reward_phase_start_block_height, 240); assert_eq!(pox_info.next_cycle.prepare_phase_start_block_height, 235); assert_eq!(pox_info.next_cycle.blocks_until_prepare_phase, -4); @@ -7472,11 +7369,11 @@ fn pox_integration_test() { assert_eq!(recipient_slots.len(), 2); assert_eq!( - recipient_slots.get(&format!("{}", &pox_2_address)).cloned(), + recipient_slots.get(&format!("{pox_2_address}")).cloned(), Some(7u64) ); assert_eq!( - recipient_slots.get(&format!("{}", &pox_1_address)).cloned(), + recipient_slots.get(&format!("{pox_1_address}")).cloned(), Some(7u64) ); @@ -7490,7 +7387,7 @@ fn pox_integration_test() { while sort_height < ((17 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // get the canonical chain tip @@ -7513,7 +7410,7 @@ fn pox_integration_test() { while sort_height < ((18 * pox_constants.reward_cycle_length) - 1).into() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let utxos = btc_regtest_controller.get_all_utxos(&pox_2_pubkey); @@ -7661,7 +7558,7 @@ fn atlas_integration_test() { // (stx-to-burn uint)) let namespace = "passport"; let salt = "some-salt"; - let salted_namespace = format!("{}{}", namespace, salt); + let salted_namespace = format!("{namespace}{salt}"); let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); let tx_1 = make_contract_call( &user_1, @@ -7677,14 +7574,14 @@ fn atlas_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_1.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -7759,14 +7656,14 @@ fn atlas_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_2.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -7810,14 +7707,14 @@ fn atlas_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -7830,7 +7727,7 @@ fn atlas_integration_test() { while sort_height < few_blocks { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // Then check that the follower is correctly replicating the attachment @@ -7852,27 +7749,23 @@ fn atlas_integration_test() { while sort_height < few_blocks { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // Poll GET v2/attachments/ for i in 1..10 { let mut attachments_did_sync = false; let mut timeout = 60; - while attachments_did_sync != true { - let zonefile_hex = hex_bytes(&format!("facade0{}", i)).unwrap(); + while !attachments_did_sync { + let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); let hashed_zonefile = Hash160::from_data(&zonefile_hex); - let path = format!( - "{}/v2/attachments/{}", - &http_origin, - hashed_zonefile.to_hex() - ); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client .get(&path) .header("Content-Type", "application/json") .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); assert_eq!(attachment_response.attachment.content, zonefile_hex); @@ -7944,20 +7837,16 @@ fn atlas_integration_test() { // Now wait for the node to sync the attachment let mut attachments_did_sync = false; let mut timeout = 60; - while attachments_did_sync != true { + while !attachments_did_sync { let zonefile_hex = "facade00"; let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); - let path = format!( - "{}/v2/attachments/{}", - &http_origin, - hashed_zonefile.to_hex() - ); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client .get(&path) .header("Content-Type", "application/json") .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { eprintln!("Success syncing attachment - {}", res.text().unwrap()); attachments_did_sync = true; @@ -7966,7 +7855,7 @@ fn atlas_integration_test() { if timeout == 0 { panic!("Failed syncing 1 attachments between 2 neon runloops within 60s - Something is wrong"); } - eprintln!("Attachment {} not sync'd yet", zonefile_hex); + eprintln!("Attachment {zonefile_hex} not sync'd yet"); thread::sleep(Duration::from_millis(1000)); } } @@ -7980,9 +7869,9 @@ fn atlas_integration_test() { let namespace = "passport"; for i in 1..10 { let user = StacksPrivateKey::new(); - let zonefile_hex = format!("facade0{}", i); + let zonefile_hex = format!("facade0{i}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - let name = format!("johndoe{}", i); + let name = format!("johndoe{i}"); let tx = make_contract_call( &user_1, 2 + i, @@ -8007,14 +7896,14 @@ fn atlas_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8040,20 +7929,16 @@ fn atlas_integration_test() { for i in 1..10 { let mut attachments_did_sync = false; let mut timeout = 60; - while attachments_did_sync != true { - let zonefile_hex = hex_bytes(&format!("facade0{}", i)).unwrap(); + while !attachments_did_sync { + let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); let hashed_zonefile = Hash160::from_data(&zonefile_hex); - let path = format!( - "{}/v2/attachments/{}", - &http_origin, - hashed_zonefile.to_hex() - ); + let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client .get(&path) .header("Content-Type", "application/json") .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); assert_eq!(attachment_response.attachment.content, zonefile_hex); @@ -8072,7 +7957,7 @@ fn atlas_integration_test() { // Ensure that we the attached sidecar was able to receive a total of 10 attachments // This last assertion is flacky for some reason, it does not worth bullying the CI or disabling this whole test // We're using an inequality as a best effort, to make sure that **some** attachments were received. - assert!(test_observer::get_attachments().len() > 0); + assert!(!test_observer::get_attachments().is_empty()); test_observer::clear(); channel.stop_chains_coordinator(); @@ -8122,8 +8007,8 @@ fn antientropy_integration_test() { // Prepare the config of the follower node let (mut conf_follower_node, _) = neon_integration_test_conf(); let bootstrap_node_url = format!( - "{}@{}", - bootstrap_node_public_key, conf_bootstrap_node.node.p2p_bind + "{bootstrap_node_public_key}@{}", + conf_bootstrap_node.node.p2p_bind ); conf_follower_node.connection_options.disable_block_download = true; conf_follower_node.node.set_bootstrap_nodes( @@ -8195,10 +8080,10 @@ fn antientropy_integration_test() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); for i in 0..(target_height - 3) { - eprintln!("Mine block {}", i); + eprintln!("Mine block {i}"); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // Let's setup the follower now. @@ -8214,11 +8099,11 @@ fn antientropy_integration_test() { println!("Follower has finished"); } Ok(x) => { - println!("Follower gave a bad signal: {:?}", &x); + println!("Follower gave a bad signal: {x:?}"); panic!(); } Err(e) => { - println!("Failed to recv: {:?}", &e); + println!("Failed to recv: {e:?}"); panic!(); } }; @@ -8255,8 +8140,7 @@ fn antientropy_integration_test() { let mut sort_height = channel.get_sortitions_processed(); while sort_height < (target_height + 200) as u64 { eprintln!( - "Follower sortition is {}, target is {}", - sort_height, + "Follower sortition is {sort_height}, target is {}", target_height + 200 ); wait_for_runloop(&blocks_processed); @@ -8269,8 +8153,7 @@ fn antientropy_integration_test() { // wait for block height to reach target let mut tip_height = get_chain_tip_height(&http_origin); eprintln!( - "Follower Stacks tip height is {}, wait until {} >= {} - 3", - tip_height, tip_height, target_height + "Follower Stacks tip height is {tip_height}, wait until {tip_height} >= {target_height} - 3" ); let btc_regtest_controller = BitcoinRegtestController::with_burnchain( @@ -8285,7 +8168,7 @@ fn antientropy_integration_test() { sleep_ms(1000); tip_height = get_chain_tip_height(&http_origin); - eprintln!("Follower Stacks tip height is {}", tip_height); + eprintln!("Follower Stacks tip height is {tip_height}"); if burnchain_deadline < get_epoch_time_secs() { burnchain_deadline = get_epoch_time_secs() + 60; @@ -8304,12 +8187,13 @@ fn antientropy_integration_test() { channel.stop_chains_coordinator(); } +#[allow(clippy::too_many_arguments)] fn wait_for_mined( btc_regtest_controller: &mut BitcoinRegtestController, blocks_processed: &Arc, http_origin: &str, users: &[StacksPrivateKey], - account_before_nonces: &Vec, + account_before_nonces: &[u64], batch_size: usize, batches: usize, index_block_hashes: &mut Vec, @@ -8318,7 +8202,7 @@ fn wait_for_mined( let mut account_after_nonces = vec![0; batches * batch_size]; let mut all_mined = false; for _k in 0..10 { - next_block_and_wait(btc_regtest_controller, &blocks_processed); + next_block_and_wait(btc_regtest_controller, blocks_processed); sleep_ms(10_000); let (ch, bhh) = get_chain_tip(http_origin); @@ -8327,29 +8211,28 @@ fn wait_for_mined( if let Some(last_ibh) = index_block_hashes.last() { if *last_ibh != ibh { index_block_hashes.push(ibh); - eprintln!("Tip is now {}", &ibh); + eprintln!("Tip is now {ibh}"); } } for j in 0..batches * batch_size { - let account_after = get_account(&http_origin, &to_addr(&users[j])); + let account_after = get_account(http_origin, &to_addr(&users[j])); let account_after_nonce = account_after.nonce; account_after_nonces[j] = account_after_nonce; - if account_before_nonces[j] + 1 <= account_after_nonce { + if account_before_nonces[j] < account_after_nonce { all_mined_vec[j] = true; } } - all_mined = all_mined_vec.iter().fold(true, |acc, elem| acc && *elem); + all_mined = all_mined_vec.iter().all(|elem| *elem); if all_mined { break; } } if !all_mined { eprintln!( - "Failed to mine all transactions: nonces = {:?}, expected {:?} + {}", - &account_after_nonces, account_before_nonces, batch_size + "Failed to mine all transactions: nonces = {account_after_nonces:?}, expected {account_before_nonces:?} + {batch_size}" ); panic!(); } @@ -8450,7 +8333,7 @@ fn atlas_stress_integration_test() { // (stx-to-burn uint)) let namespace = "passport"; let salt = "some-salt"; - let salted_namespace = format!("{}{}", namespace, salt); + let salted_namespace = format!("{namespace}{salt}"); let hashed_namespace = Hash160::from_data(salted_namespace.as_bytes()); let tx_1 = make_contract_call( &user_1, @@ -8466,14 +8349,14 @@ fn atlas_stress_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_1.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if res.status().is_success() { let res: String = res.json().unwrap(); assert_eq!( @@ -8548,7 +8431,7 @@ fn atlas_stress_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -8626,14 +8509,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8660,8 +8543,7 @@ fn atlas_stress_integration_test() { } if !all_mined { eprintln!( - "Failed to mine all transactions: nonce = {}, expected {}", - account_after_nonce, + "Failed to mine all transactions: nonce = {account_after_nonce}, expected {}", account_before.nonce + (batch_size as u64) ); panic!(); @@ -8682,14 +8564,14 @@ fn atlas_stress_integration_test() { &[Value::buff_from(namespace.as_bytes().to_vec()).unwrap()], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") .body(tx_4.clone()) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8723,7 +8605,7 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let fqn = format!("janedoe{}.passport", j); + let fqn = format!("janedoe{j}.passport"); let fqn_bytes = fqn.as_bytes().to_vec(); let salt = format!("{:04x}", j); let salt_bytes = salt.as_bytes().to_vec(); @@ -8746,7 +8628,7 @@ fn atlas_stress_integration_test() { ], ); - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -8755,9 +8637,8 @@ fn atlas_stress_integration_test() { .unwrap(); eprintln!( - "sent preorder for {}:\n{:#?}", - &to_addr(&users[batches * batch_size + j]), - res + "sent preorder for {}:\n{res:#?}", + &to_addr(&users[batches * batch_size + j]) ); if !res.status().is_success() { panic!(""); @@ -8784,10 +8665,10 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let name = format!("janedoe{}", j); - let salt = format!("{:04x}", j); + let name = format!("janedoe{j}"); + let salt = format!("{j:04x}"); - let zonefile_hex = format!("facade01{:04x}", j); + let zonefile_hex = format!("facade01{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); all_zonefiles.push(zonefile_hex.clone()); @@ -8816,14 +8697,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8850,8 +8731,8 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let name = format!("janedoe{}", j); - let zonefile_hex = format!("facade02{:04x}", j); + let name = format!("janedoe{j}"); + let zonefile_hex = format!("facade02{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); all_zonefiles.push(zonefile_hex.clone()); @@ -8879,14 +8760,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8913,8 +8794,8 @@ fn atlas_stress_integration_test() { get_account(&http_origin, &to_addr(&users[batches * batch_size + j])); account_before_nonces[j] = account_before.nonce; - let name = format!("janedoe{}", j); - let zonefile_hex = format!("facade03{:04x}", j); + let name = format!("janedoe{j}"); + let zonefile_hex = format!("facade03{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); all_zonefiles.push(zonefile_hex.clone()); @@ -8945,14 +8826,14 @@ fn atlas_stress_integration_test() { serde_json::to_vec(&json!(content)).unwrap() }; - let path = format!("{}/v2/transactions", &http_origin); + let path = format!("{http_origin}/v2/transactions"); let res = client .post(&path) .header("Content-Type", "application/json") .body(body) .send() .unwrap(); - eprintln!("{:#?}", res); + eprintln!("{res:#?}"); if !res.status().is_success() { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -8984,8 +8865,8 @@ fn atlas_stress_integration_test() { &[ibh], ) .unwrap(); - if indexes.len() > 0 { - attachment_indexes.insert(ibh.clone(), indexes.clone()); + if !indexes.is_empty() { + attachment_indexes.insert(*ibh, indexes.clone()); } for index in indexes.iter() { @@ -8995,14 +8876,14 @@ fn atlas_stress_integration_test() { params![ibh, u64_to_sql(*index).unwrap()], "content_hash") .unwrap(); - if hashes.len() > 0 { + if !hashes.is_empty() { assert_eq!(hashes.len(), 1); - attachment_hashes.insert((ibh.clone(), *index), hashes.pop()); + attachment_hashes.insert((*ibh, *index), hashes.pop()); } } } } - eprintln!("attachment_indexes = {:?}", &attachment_indexes); + eprintln!("attachment_indexes = {attachment_indexes:?}"); let max_request_time_ms = 100; @@ -9017,12 +8898,10 @@ fn atlas_stress_integration_test() { ..cmp::min((i + 1) * MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, l)] .to_vec(); let path = format!( - "{}/v2/attachments/inv?index_block_hash={}&pages_indexes={}", - &http_origin, - ibh, + "{http_origin}/v2/attachments/inv?index_block_hash={ibh}&pages_indexes={}", attachments_batch .iter() - .map(|a| format!("{}", &a)) + .map(|a| format!("{a}")) .collect::>() .join(",") ); @@ -9034,40 +8913,34 @@ fn atlas_stress_integration_test() { if res.status().is_success() { let attachment_inv_response: GetAttachmentsInvResponse = res.json().unwrap(); - eprintln!( - "attachment inv response for {}: {:?}", - &path, &attachment_inv_response - ); + eprintln!("attachment inv response for {path}: {attachment_inv_response:?}"); } else { - eprintln!("Bad response for `{}`: `{:?}`", &path, res.text().unwrap()); + eprintln!("Bad response for `{path}`: `{:?}`", res.text().unwrap()); panic!(); } } let ts_end = get_epoch_time_ms(); let total_time = ts_end.saturating_sub(ts_begin); - eprintln!("Requested {} {} times in {}ms", &path, attempts, total_time); + eprintln!("Requested {path} {attempts} times in {total_time}ms"); // requests should take no more than max_request_time_ms assert!( total_time < attempts * max_request_time_ms, - "Atlas inventory request is too slow: {} >= {} * {}", - total_time, - attempts, - max_request_time_ms + "Atlas inventory request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" ); } - for i in 0..l { - if attachments[i] == 0 { + for attachment in attachments.iter().take(l) { + if *attachment == 0 { continue; } let content_hash = attachment_hashes - .get(&(*ibh, attachments[i])) + .get(&(*ibh, *attachment)) .cloned() .unwrap() .unwrap(); - let path = format!("{}/v2/attachments/{}", &http_origin, &content_hash); + let path = format!("{http_origin}/v2/attachments/{content_hash}"); let attempts = 10; let ts_begin = get_epoch_time_ms(); @@ -9076,26 +8949,20 @@ fn atlas_stress_integration_test() { if res.status().is_success() { let attachment_response: GetAttachmentResponse = res.json().unwrap(); - eprintln!( - "attachment response for {}: {:?}", - &path, &attachment_response - ); + eprintln!("attachment response for {path}: {attachment_response:?}"); } else { - eprintln!("Bad response for `{}`: `{:?}`", &path, res.text().unwrap()); + eprintln!("Bad response for `{path}`: `{:?}`", res.text().unwrap()); panic!(); } } let ts_end = get_epoch_time_ms(); let total_time = ts_end.saturating_sub(ts_begin); - eprintln!("Requested {} {} times in {}ms", &path, attempts, total_time); + eprintln!("Requested {path} {attempts} times in {total_time}ms"); // requests should take no more than max_request_time_ms assert!( total_time < attempts * max_request_time_ms, - "Atlas chunk request is too slow: {} >= {} * {}", - total_time, - attempts, - max_request_time_ms + "Atlas chunk request is too slow: {total_time} >= {attempts} * {max_request_time_ms}" ); } } @@ -9129,8 +8996,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value (unwrap! (increment) (err u1)) (unwrap! (increment) (err u1)) (ok (var-get counter)))) - "# - .to_string(); + "#; let spender_sk = StacksPrivateKey::new(); let spender_addr = to_addr(&spender_sk); @@ -9144,7 +9010,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value conf.estimation.fee_rate_window_size = window_size; conf.initial_balances.push(InitialBalance { - address: spender_addr.clone().into(), + address: spender_addr.into(), amount: 10000000000, }); test_observer::spawn(); @@ -9181,7 +9047,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value 110000, conf.burnchain.chain_id, "increment-contract", - &max_contract_src, + max_contract_src, ), ); run_until_burnchain_height(&mut btc_regtest_controller, &blocks_processed, 212, &conf); @@ -9198,7 +9064,7 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value i, // nonce i * 100000, // payment conf.burnchain.chain_id, - &spender_addr.into(), + &spender_addr, "increment-contract", "increment-many", &[], @@ -9213,12 +9079,12 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value { // Read from the fee estimation endpoin. - let path = format!("{}/v2/fees/transaction", &http_origin); + let path = format!("{http_origin}/v2/fees/transaction"); let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { - address: spender_addr.clone().into(), - contract_name: ContractName::try_from("increment-contract").unwrap(), - function_name: ClarityName::try_from("increment-many").unwrap(), + address: spender_addr, + contract_name: ContractName::from("increment-contract"), + function_name: ClarityName::from("increment-many"), function_args: vec![], }); @@ -9255,8 +9121,8 @@ fn fuzzed_median_fee_rate_estimation_test(window_size: u64, expected_final_value let last_cost = response_estimated_costs[i - 1]; assert_eq!(curr_cost, last_cost); - let curr_rate = response_top_fee_rates[i] as f64; - let last_rate = response_top_fee_rates[i - 1] as f64; + let curr_rate = response_top_fee_rates[i]; + let last_rate = response_top_fee_rates[i - 1]; assert!(curr_rate >= last_rate); } @@ -9438,7 +9304,7 @@ fn use_latest_tip_integration_test() { let client = reqwest::blocking::Client::new(); // Post the microblock - let path = format!("{}/v2/microblocks", &http_origin); + let path = format!("{http_origin}/v2/microblocks"); let res: String = client .post(&path) .header("Content-Type", "application/octet-stream") @@ -9452,7 +9318,7 @@ fn use_latest_tip_integration_test() { // Wait for the microblock to be accepted sleep_ms(5_000); - let path = format!("{}/v2/info", &http_origin); + let path = format!("{http_origin}/v2/info"); let mut iter_count = 0; loop { let tip_info = client @@ -9594,26 +9460,26 @@ fn test_flash_block_skip_tenure() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); // fault injection: force tenures to take too long - std::env::set_var("STX_TEST_SLOW_TENURE".to_string(), "11000".to_string()); + std::env::set_var("STX_TEST_SLOW_TENURE", "11000"); for i in 0..10 { // build one bitcoin block every 10 seconds - eprintln!("Build bitcoin block +{}", i); + eprintln!("Build bitcoin block +{i}"); btc_regtest_controller.build_next_block(1); sleep_ms(10000); } // at least one tenure was skipped let num_skipped = missed_tenures.load(Ordering::SeqCst); - eprintln!("Skipped {} tenures", &num_skipped); + eprintln!("Skipped {num_skipped} tenures"); assert!(num_skipped > 1); // let's query the miner's account nonce: - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); - eprintln!("account = {:?}", &account); + eprintln!("account = {account:?}"); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 2); @@ -9696,20 +9562,20 @@ fn test_problematic_txs_are_not_stored() { let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { - address: spender_addr_1.clone(), + address: spender_addr_1, amount: 1_000_000_000_000, }); conf.initial_balances.push(InitialBalance { - address: spender_addr_2.clone(), + address: spender_addr_2, amount: 1_000_000_000_000, }); conf.initial_balances.push(InitialBalance { - address: spender_addr_3.clone(), + address: spender_addr_3, amount: 1_000_000_000_000, }); // force mainnet limits in 2.05 for this test - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -9731,7 +9597,7 @@ fn test_problematic_txs_are_not_stored() { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]); + ])); conf.burnchain.pox_2_activation = Some(10_003); // take effect immediately @@ -9754,7 +9620,7 @@ fn test_problematic_txs_are_not_stored() { let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); - let tx_edge_body = format!("{}u1 {}", tx_edge_body_start, tx_edge_body_end); + let tx_edge_body = format!("{tx_edge_body_start}u1 {tx_edge_body_end}"); let tx_edge = make_contract_publish( &spender_sk_1, @@ -9772,7 +9638,7 @@ fn test_problematic_txs_are_not_stored() { let exceeds_repeat_factor = edge_repeat_factor + 1; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish( &spender_sk_2, @@ -9790,7 +9656,7 @@ fn test_problematic_txs_are_not_stored() { let high_repeat_factor = 128 * 1024; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish( &spender_sk_3, @@ -9840,25 +9706,24 @@ fn test_problematic_txs_are_not_stored() { fn find_new_files(dirp: &str, prev_files: &HashSet) -> (Vec, HashSet) { let dirpp = Path::new(dirp); - debug!("readdir {}", dirp); + debug!("readdir {dirp}"); let cur_files = fs::read_dir(dirp).unwrap(); let mut new_files = vec![]; let mut cur_files_set = HashSet::new(); for cur_file in cur_files.into_iter() { let cur_file = cur_file.unwrap(); let cur_file_fullpath = dirpp.join(cur_file.path()).to_str().unwrap().to_string(); - test_debug!("file in {}: {}", dirp, &cur_file_fullpath); + test_debug!("file in {dirp}: {cur_file_fullpath}"); cur_files_set.insert(cur_file_fullpath.clone()); if prev_files.contains(&cur_file_fullpath) { - test_debug!("already contains {}", &cur_file_fullpath); + test_debug!("already contains {cur_file_fullpath}"); continue; } - test_debug!("new file {}", &cur_file_fullpath); + test_debug!("new file {cur_file_fullpath}"); new_files.push(cur_file_fullpath); } debug!( - "Checked {} for new files; found {} (all: {})", - dirp, + "Checked {dirp} for new files; found {} (all: {})", new_files.len(), cur_files_set.len() ); @@ -9894,8 +9759,7 @@ fn spawn_follower_node( conf.initial_balances = initial_conf.initial_balances.clone(); conf.burnchain.epochs = initial_conf.burnchain.epochs.clone(); - conf.burnchain.ast_precheck_size_height = - initial_conf.burnchain.ast_precheck_size_height.clone(); + conf.burnchain.ast_precheck_size_height = initial_conf.burnchain.ast_precheck_size_height; conf.connection_options.inv_sync_interval = 3; @@ -9923,12 +9787,12 @@ fn test_problematic_blocks_are_not_mined() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_mined"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -9956,7 +9820,7 @@ fn test_problematic_blocks_are_not_mined() { }); // force mainnet limits in 2.05 for this test - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -9978,7 +9842,7 @@ fn test_problematic_blocks_are_not_mined() { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]); + ])); conf.burnchain.pox_2_activation = Some(10_003); // AST precheck becomes default at burn height @@ -10000,7 +9864,7 @@ fn test_problematic_blocks_are_not_mined() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish( &spender_sk_2, @@ -10018,7 +9882,7 @@ fn test_problematic_blocks_are_not_mined() { let high_repeat_factor = 3200; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish( &spender_sk_3, @@ -10054,20 +9918,11 @@ fn test_problematic_blocks_are_not_mined() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10116,7 +9971,7 @@ fn test_problematic_blocks_are_not_mined() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -10125,31 +9980,25 @@ fn test_problematic_blocks_are_not_mined() { assert_eq!(cur_ast_rules, ASTRules::Typical); // add another bad tx to the mempool - debug!("Submit problematic tx_high transaction {}", &tx_high_txid); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); btc_regtest_controller.build_next_block(1); // wait for runloop to advance wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) .expect("Failed waiting for blocks to be processed"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10162,7 +10011,7 @@ fn test_problematic_blocks_are_not_mined() { let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some blocks, and log problematic blocks for _i in 0..6 { @@ -10185,10 +10034,8 @@ fn test_problematic_blocks_are_not_mined() { // recently-submitted problematic transactions are not in the mempool // (but old ones that were already mined, and thus never considered, could still be present) - for txid in &[&tx_high_txid] { - test_debug!("Problematic tx {} should be dropped", txid); - assert!(get_unconfirmed_tx(&http_origin, txid).is_none()); - } + test_debug!("Problematic tx {tx_high_txid} should be dropped"); + assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); // no block contained the tx_high bad transaction, ever let blocks = test_observer::get_blocks(); @@ -10233,8 +10080,7 @@ fn test_problematic_blocks_are_not_mined() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -10278,12 +10124,12 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_blocks_are_not_relayed_or_stored"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -10311,7 +10157,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { }); // force mainnet limits in 2.05 for this test - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -10333,7 +10179,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]); + ])); conf.burnchain.pox_2_activation = Some(10_003); // AST precheck becomes default at burn height @@ -10355,7 +10201,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish( &spender_sk_2, @@ -10372,7 +10218,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let high_repeat_factor = 70; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish( &spender_sk_3, @@ -10408,20 +10254,11 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10470,7 +10307,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -10484,14 +10321,14 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { loop { sleep_ms(1_000); let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); if new_tip.block_height > tip.block_height { break; } } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10510,7 +10347,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10521,23 +10358,17 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // add another bad tx to the mempool. // because the miner is now non-conformant, it should mine this tx. - debug!("Submit problematic tx_high transaction {}", &tx_high_txid); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some blocks, and log problematic blocks for _i in 0..6 { @@ -10549,7 +10380,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10616,10 +10447,8 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { break; } eprintln!( - "\nFollower is at burn block {} stacks block {} (bad_block is {})\n", - follower_tip_info.burn_block_height, - follower_tip_info.stacks_tip_height, - bad_block_height + "\nFollower is at burn block {} stacks block {} (bad_block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); sleep_ms(1000); } @@ -10627,8 +10456,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -10648,8 +10476,8 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let follower_tip_info = get_chain_info(&follower_conf); eprintln!( - "\nFollower is at burn block {} stacks block {} (bad block is {})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, bad_block_height + "\nFollower is at burn block {} stacks block {} (bad block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); // follower rejects the bad block @@ -10669,12 +10497,12 @@ fn test_problematic_microblocks_are_not_mined() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_microblocks_are_not_mined"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -10702,7 +10530,7 @@ fn test_problematic_microblocks_are_not_mined() { }); // force mainnet limits in 2.05 for this test - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -10724,7 +10552,7 @@ fn test_problematic_microblocks_are_not_mined() { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]); + ])); conf.burnchain.pox_2_activation = Some(10_003); // AST precheck becomes default at burn height @@ -10752,7 +10580,7 @@ fn test_problematic_microblocks_are_not_mined() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish_microblock_only( &spender_sk_2, @@ -10767,11 +10595,10 @@ fn test_problematic_microblocks_are_not_mined() { .txid(); // something stupidly high over the expression depth - let high_repeat_factor = - (AST_CALL_STACK_DEPTH_BUFFER as u64) + (MAX_CALL_STACK_DEPTH as u64) + 1; + let high_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish_microblock_only( &spender_sk_3, @@ -10807,24 +10634,12 @@ fn test_problematic_microblocks_are_not_mined() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - info!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + info!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); - info!( - "Submitted problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); + info!("Submitted problematic tx_exceeds transaction {tx_exceeds_txid}"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -10876,7 +10691,7 @@ fn test_problematic_microblocks_are_not_mined() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -10885,39 +10700,27 @@ fn test_problematic_microblocks_are_not_mined() { assert_eq!(cur_ast_rules, ASTRules::Typical); // add another bad tx to the mempool - info!("Submit problematic tx_high transaction {}", &tx_high_txid); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + info!("Submit problematic tx_high transaction {tx_high_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); - info!( - "Submitted problematic tx_high transaction {}", - &tx_high_txid - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); + info!("Submitted problematic tx_high transaction {tx_high_txid}"); btc_regtest_controller.build_next_block(1); - info!( - "Mined block after submitting problematic tx_high transaction {}", - &tx_high_txid - ); + info!("Mined block after submitting problematic tx_high transaction {tx_high_txid}"); // wait for runloop to advance wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) .expect("Failed waiting for runloop to advance"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -10930,7 +10733,7 @@ fn test_problematic_microblocks_are_not_mined() { let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some microblocks, and log problematic microblocks for _i in 0..6 { @@ -10956,10 +10759,8 @@ fn test_problematic_microblocks_are_not_mined() { // recently-submitted problematic transactions are not in the mempool // (but old ones that were already mined, and thus never considered, could still be present) - for txid in &[&tx_high_txid] { - test_debug!("Problematic tx {} should be dropped", txid); - assert!(get_unconfirmed_tx(&http_origin, txid).is_none()); - } + test_debug!("Problematic tx {tx_high_txid} should be dropped"); + assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); // no microblock contained the tx_high bad transaction, ever let microblocks = test_observer::get_microblocks(); @@ -11004,8 +10805,7 @@ fn test_problematic_microblocks_are_not_mined() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -11049,12 +10849,12 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { } let bad_blocks_dir = "/tmp/bad-blocks-test_problematic_microblocks_are_not_relayed_or_stored"; - if fs::metadata(&bad_blocks_dir).is_ok() { - fs::remove_dir_all(&bad_blocks_dir).unwrap(); + if fs::metadata(bad_blocks_dir).is_ok() { + fs::remove_dir_all(bad_blocks_dir).unwrap(); } - fs::create_dir_all(&bad_blocks_dir).unwrap(); + fs::create_dir_all(bad_blocks_dir).unwrap(); - std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir.to_string()); + std::env::set_var("STACKS_BAD_BLOCKS_DIR", bad_blocks_dir); let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); @@ -11082,7 +10882,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { }); // force mainnet limits in 2.05 for this test - conf.burnchain.epochs = Some(vec![ + conf.burnchain.epochs = Some(EpochList::new(&[ StacksEpoch { epoch_id: StacksEpochId::Epoch20, start_height: 0, @@ -11104,7 +10904,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_1, }, - ]); + ])); conf.burnchain.pox_2_activation = Some(10_003); // AST precheck becomes default at burn height @@ -11134,7 +10934,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let exceeds_repeat_factor = 32; let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); - let tx_exceeds_body = format!("{}u1 {}", tx_exceeds_body_start, tx_exceeds_body_end); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); let tx_exceeds = make_contract_publish_microblock_only( &spender_sk_2, @@ -11149,11 +10949,10 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { .txid(); // greatly exceeds AST depth, but is still mineable without a stack overflow - let high_repeat_factor = - (AST_CALL_STACK_DEPTH_BUFFER as u64) + (MAX_CALL_STACK_DEPTH as u64) + 1; + let high_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; let tx_high_body_start = "{ a : ".repeat(high_repeat_factor as usize); let tx_high_body_end = "} ".repeat(high_repeat_factor as usize); - let tx_high_body = format!("{}u1 {}", tx_high_body_start, tx_high_body_end); + let tx_high_body = format!("{tx_high_body_start}u1 {tx_high_body_end}"); let tx_high = make_contract_publish_microblock_only( &spender_sk_3, @@ -11189,20 +10988,11 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // Third block will be the first mined Stacks block. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - debug!( - "Submit problematic tx_exceeds transaction {}", - &tx_exceeds_txid - ); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + debug!("Submit problematic tx_exceeds transaction {tx_exceeds_txid}"); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_exceeds); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); @@ -11254,7 +11044,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let (tip, cur_ast_rules) = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); (tip, cur_ast_rules) @@ -11267,14 +11057,14 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // wait for runloop to advance wait_for(30, || { let sortdb = btc_regtest_controller.sortdb_mut(); - let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let new_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(new_tip.block_height > tip.block_height) }) .expect("Failed waiting for runloop to advance"); let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -11293,7 +11083,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { } let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -11304,24 +11094,18 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // add another bad tx to the mempool. // because the miner is now non-conformant, it should mine this tx. - debug!("Submit problematic tx_high transaction {}", &tx_high_txid); + debug!("Submit problematic tx_high transaction {tx_high_txid}"); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "1".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "1"); submit_tx(&http_origin, &tx_high); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_some()); - std::env::set_var( - "STACKS_DISABLE_TX_PROBLEMATIC_CHECK".to_string(), - "0".to_string(), - ); + std::env::set_var("STACKS_DISABLE_TX_PROBLEMATIC_CHECK", "0"); let (_, mut cur_files) = find_new_files(bad_blocks_dir, &HashSet::new()); let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - eprintln!("old_tip_info = {:?}", &old_tip_info); + eprintln!("old_tip_info = {old_tip_info:?}"); // mine some blocks, and log problematic microblocks for _i in 0..6 { @@ -11333,7 +11117,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let cur_ast_rules = { let sortdb = btc_regtest_controller.sortdb_mut(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); cur_ast_rules @@ -11356,7 +11140,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // at least one was problematic. // the miner might make multiple microblocks (only some of which are confirmed), so also check // the event observer to see that we actually picked up tx_high - assert!(all_new_files.len() >= 1); + assert!(!all_new_files.is_empty()); // tx_high got mined by the miner let microblocks = test_observer::get_microblocks(); @@ -11381,8 +11165,8 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { .split("0x") .collect(); let bad_block_id_hex = parts[1]; - debug!("bad_block_id_hex = '{}'", &bad_block_id_hex); - Some(StacksBlockId::from_hex(&bad_block_id_hex).unwrap()) + debug!("bad_block_id_hex = '{bad_block_id_hex}'"); + Some(StacksBlockId::from_hex(bad_block_id_hex).unwrap()) }; } } @@ -11420,8 +11204,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // make sure we aren't just slow -- wait for the follower to do a few download passes let num_download_passes = pox_sync_comms.get_download_passes(); eprintln!( - "\nFollower has performed {} download passes; wait for {}\n", - num_download_passes, + "\nFollower has performed {num_download_passes} download passes; wait for {}\n", num_download_passes + 5 ); @@ -11441,8 +11224,8 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let follower_tip_info = get_chain_info(&follower_conf); eprintln!( - "\nFollower is at burn block {} stacks block {} (bad block is {})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, bad_block_height + "\nFollower is at burn block {} stacks block {} (bad block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); // follower rejects the bad microblock -- can't append subsequent blocks @@ -11570,9 +11353,8 @@ fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &st let full_iters_code = full_iters_code_parts.join("\n "); - let iters_mod_code_parts: Vec = (0..iters_mod) - .map(|cnt| format!("0x{:0>2x}", cnt)) - .collect(); + let iters_mod_code_parts: Vec = + (0..iters_mod).map(|cnt| format!("0x{cnt:0>2x}")).collect(); let iters_mod_code = format!("(list {})", iters_mod_code_parts.join(" ")); @@ -11599,7 +11381,7 @@ fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &st (define-private (crash-me-folder (input (buff 1)) (ctr uint)) (begin ;; full_iters_code - {} + {full_iters_code} (+ u1 ctr) ) ) @@ -11608,20 +11390,17 @@ fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &st ;; call index-of (iters_256 * 256) times (fold crash-me-folder BUFF_TO_BYTE u0) ;; call index-of iters_mod times - (fold crash-me-folder {} u0) + (fold crash-me-folder {iters_mod_code} u0) (print name) (ok u0) ) ) (begin - (crash-me \"{}\")) + (crash-me \"large-{nonce}-{addr_prefix}-{num_index_of}\")) ", - full_iters_code, - iters_mod_code, - &format!("large-{}-{}-{}", nonce, &addr_prefix, num_index_of) ); - eprintln!("{}", &code); + eprintln!("{code}"); code } @@ -11636,13 +11415,14 @@ pub fn make_expensive_tx_chain( chain_id: u32, mblock_only: bool, ) -> Vec> { - let addr = to_addr(&privk); + let addr = to_addr(privk); let mut chain = vec![]; + let num_index_of = 256; for nonce in 0..25 { let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); - let contract_name = format!("large-{}-{}-{}", nonce, &addr_prefix, 256); - eprintln!("Make tx {}", &contract_name); + let contract_name = format!("large-{nonce}-{addr_prefix}-{num_index_of}"); + eprintln!("Make tx {contract_name}"); let tx = if mblock_only { make_contract_publish_microblock_only( privk, @@ -11650,7 +11430,7 @@ pub fn make_expensive_tx_chain( 1049230 + nonce + fee_plus, chain_id, &contract_name, - &make_runtime_sized_contract(256, nonce, &addr_prefix), + &make_runtime_sized_contract(num_index_of, nonce, &addr_prefix), ) } else { make_contract_publish( @@ -11659,7 +11439,7 @@ pub fn make_expensive_tx_chain( 1049230 + nonce + fee_plus, chain_id, &contract_name, - &make_runtime_sized_contract(256, nonce, &addr_prefix), + &make_runtime_sized_contract(num_index_of, nonce, &addr_prefix), ) }; chain.push(tx); @@ -11673,7 +11453,7 @@ pub fn make_random_tx_chain( chain_id: u32, mblock_only: bool, ) -> Vec> { - let addr = to_addr(&privk); + let addr = to_addr(privk); let mut chain = vec![]; for nonce in 0..25 { @@ -11689,8 +11469,8 @@ pub fn make_random_tx_chain( let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); - let contract_name = format!("large-{}-{}-{}", nonce, &addr_prefix, random_iters); - eprintln!("Make tx {}", &contract_name); + let contract_name = format!("large-{nonce}-{addr_prefix}-{random_iters}"); + eprintln!("Make tx {contract_name}"); let tx = if mblock_only { make_contract_publish_microblock_only( privk, @@ -11716,7 +11496,7 @@ pub fn make_random_tx_chain( } fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) -> Vec> { - let addr = to_addr(&privk); + let addr = to_addr(privk); let mut chain = vec![]; for nonce in 0..25 { @@ -11732,8 +11512,8 @@ fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); - let contract_name = format!("crct-{}-{}-{}", nonce, &addr_prefix, random_iters); - eprintln!("Make tx {}", &contract_name); + let contract_name = format!("crct-{nonce}-{addr_prefix}-{random_iters}"); + eprintln!("Make tx {contract_name}"); let tx = make_contract_publish_microblock_only( privk, nonce, @@ -11758,10 +11538,7 @@ fn test_competing_miners_build_on_same_chain( return; } - let privks: Vec<_> = (0..100) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() .map(|privk| { @@ -11808,9 +11585,8 @@ fn test_competing_miners_build_on_same_chain( confs[i].node.set_bootstrap_nodes( format!( - "{}@{}", + "{}@{p2p_bind}", &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind ), chain_id, peer_version, @@ -11818,8 +11594,8 @@ fn test_competing_miners_build_on_same_chain( } // use long reward cycles - for i in 0..num_miners { - let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + for conf in &confs { + let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); let reward_cycle_len = 100; let prepare_phase_len = 20; let pox_constants = PoxConstants::new( @@ -11856,10 +11632,10 @@ fn test_competing_miners_build_on_same_chain( btc_regtest_controller.bootstrap_chain(1); // make sure all miners have BTC - for i in 1..num_miners { + for conf in confs.iter().skip(1) { let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); btc_regtest_controller - .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + .set_mining_pubkey(conf.burnchain.local_mining_public_key.clone().unwrap()); btc_regtest_controller.bootstrap_chain(1); btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); } @@ -11879,8 +11655,8 @@ fn test_competing_miners_build_on_same_chain( let http_origin = format!("http://{}", &confs[0].node.rpc_bind); // give the run loops some time to start up! - for i in 0..num_miners { - wait_for_runloop(&blocks_processed[i as usize]); + for bp in &blocks_processed { + wait_for_runloop(bp); } // activate miners @@ -11888,7 +11664,7 @@ fn test_competing_miners_build_on_same_chain( loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 1: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 1: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -11898,23 +11674,19 @@ fn test_competing_miners_build_on_same_chain( next_block_and_wait(&mut btc_regtest_controller, &blocks_processed[0]); } - for i in 1..num_miners { - eprintln!("\n\nBoot miner {}\n\n", i); + for (i, conf) in confs.iter().enumerate().skip(1) { + eprintln!("\n\nBoot miner {i}\n\n"); loop { - let tip_info_opt = get_chain_info_opt(&confs[i]); + let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } - next_block_and_iterate( - &mut btc_regtest_controller, - &blocks_processed[i as usize], - 5_000, - ); + next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } } @@ -11938,7 +11710,7 @@ fn test_competing_miners_build_on_same_chain( let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -11948,7 +11720,7 @@ fn test_competing_miners_build_on_same_chain( // mine quickly -- see if we can induce flash blocks for i in 0..1000 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); } @@ -12023,10 +11795,7 @@ fn microblock_miner_multiple_attempts() { conf.burnchain.max_rbf = 1000000; conf.node.wait_time_for_blocks = 1_000; - let privks: Vec<_> = (0..100) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); + let privks: Vec<_> = (0..100).map(|_| StacksPrivateKey::new()).collect(); let balances: Vec<_> = privks .iter() .map(|privk| { @@ -12076,7 +11845,7 @@ fn microblock_miner_multiple_attempts() { // let's query the miner's account nonce: let account = get_account(&http_origin, &miner_account); - eprintln!("Miner account: {:?}", &account); + eprintln!("Miner account: {account:?}"); let all_txs: Vec<_> = privks .iter() @@ -12085,10 +11854,9 @@ fn microblock_miner_multiple_attempts() { .collect(); let _handle = thread::spawn(move || { - for txi in 0..all_txs.len() { - for j in 0..all_txs[txi].len() { - let tx = &all_txs[txi][j]; - eprintln!("\n\nSubmit tx {},{}\n\n", txi, j); + for (i, txi) in all_txs.iter().enumerate() { + for (j, tx) in txi.iter().enumerate() { + eprintln!("\n\nSubmit tx {i},{j}\n\n"); submit_tx(&http_origin, tx); sleep_ms(1_000); } @@ -12119,12 +11887,13 @@ fn min_txs() { test_observer::spawn(); test_observer::register_any(&mut conf); + let path = "/tmp/activate_vrf_key.min_txs.json"; conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; - conf.miner.activated_vrf_key_path = Some("/tmp/activate_vrf_key.min_txs.json".to_string()); + conf.miner.activated_vrf_key_path = Some(path.to_string()); - if fs::metadata("/tmp/activate_vrf_key.min_txs.json").is_ok() { - fs::remove_file("/tmp/activate_vrf_key.min_txs.json").unwrap(); + if fs::metadata(path).is_ok() { + fs::remove_file(path).unwrap(); } let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -12176,18 +11945,18 @@ fn min_txs() { let _sort_height = channel.get_sortitions_processed(); for i in 0..2 { - let code = format!("(print \"hello world {}\")", i); + let code = format!("(print \"hello world {i}\")"); let publish = make_contract_publish( &spender_sk, i as u64, 1000, conf.burnchain.chain_id, - &format!("test-publish-{}", &i), + &format!("test-publish-{i}"), &code, ); submit_tx(&http_origin, &publish); - debug!("Try to build too-small a block {}", &i); + debug!("Try to build too-small a block {i}"); next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 15); } @@ -12195,12 +11964,12 @@ fn min_txs() { for block in blocks { let transactions = block.get("transactions").unwrap().as_array().unwrap(); if transactions.len() > 1 { - debug!("Got block: {:?}", &block); + debug!("Got block: {block:?}"); assert!(transactions.len() >= 4); } } - let saved_vrf_key = RelayerThread::load_saved_vrf_key("/tmp/activate_vrf_key.min_txs.json"); + let saved_vrf_key = RelayerThread::load_saved_vrf_key(path); assert!(saved_vrf_key.is_some()); test_observer::clear(); @@ -12222,13 +11991,14 @@ fn filter_txs_by_type() { test_observer::spawn(); test_observer::register_any(&mut conf); + let path = "/tmp/activate_vrf_key.filter_txs.json"; conf.miner.min_tx_count = 4; conf.miner.first_attempt_time_ms = 0; - conf.miner.activated_vrf_key_path = Some("/tmp/activate_vrf_key.filter_txs.json".to_string()); + conf.miner.activated_vrf_key_path = Some(path.to_string()); conf.miner.txs_to_consider = [MemPoolWalkTxTypes::TokenTransfer].into_iter().collect(); - if fs::metadata("/tmp/activate_vrf_key.filter_txs.json").is_ok() { - fs::remove_file("/tmp/activate_vrf_key.filter_txs.json").unwrap(); + if fs::metadata(path).is_ok() { + fs::remove_file(path).unwrap(); } let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -12280,13 +12050,13 @@ fn filter_txs_by_type() { let _sort_height = channel.get_sortitions_processed(); let mut sent_txids = HashSet::new(); for i in 0..2 { - let code = format!("(print \"hello world {}\")", i); + let code = format!("(print \"hello world {i}\")"); let publish = make_contract_publish( &spender_sk, i as u64, 1000, conf.burnchain.chain_id, - &format!("test-publish-{}", &i), + &format!("test-publish-{i}"), &code, ); let parsed = StacksTransaction::consensus_deserialize(&mut &publish[..]).unwrap(); @@ -12298,7 +12068,7 @@ fn filter_txs_by_type() { let blocks = test_observer::get_blocks(); for block in blocks { - info!("block: {:?}", &block); + info!("block: {block:?}"); let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); @@ -12313,7 +12083,7 @@ fn filter_txs_by_type() { } } - let saved_vrf_key = RelayerThread::load_saved_vrf_key("/tmp/activate_vrf_key.filter_txs.json"); + let saved_vrf_key = RelayerThread::load_saved_vrf_key(path); assert!(saved_vrf_key.is_some()); test_observer::clear(); @@ -12391,13 +12161,13 @@ fn filter_txs_by_origin() { let _sort_height = channel.get_sortitions_processed(); let mut sent_txids = HashSet::new(); for i in 0..2 { - let code = format!("(print \"hello world {}\")", i); + let code = format!("(print \"hello world {i}\")"); let publish = make_contract_publish( &spender_sk, i as u64, 1000, conf.burnchain.chain_id, - &format!("test-publish-{}", &i), + &format!("test-publish-{i}"), &code, ); let parsed = StacksTransaction::consensus_deserialize(&mut &publish[..]).unwrap(); @@ -12409,7 +12179,7 @@ fn filter_txs_by_origin() { let blocks = test_observer::get_blocks(); for block in blocks { - info!("block: {:?}", &block); + info!("block: {block:?}"); let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); @@ -12479,12 +12249,12 @@ fn bitcoin_reorg_flap() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); while sort_height < 210 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } // stop bitcoind and copy its DB to simulate a chain flap @@ -12496,7 +12266,7 @@ fn bitcoin_reorg_flap() { new_conf.node.working_dir = format!("{}.new", &conf.node.working_dir); fs::create_dir_all(&new_conf.node.working_dir).unwrap(); - copy_dir_all(&btcd_dir, &new_conf.get_burnchain_path_str()).unwrap(); + copy_dir_all(&btcd_dir, new_conf.get_burnchain_path_str()).unwrap(); // resume let mut btcd_controller = BitcoinCoreController::new(conf.clone()); @@ -12681,8 +12451,7 @@ fn bitcoin_reorg_flap_with_follower() { let mut miner_sort_height = miner_channel.get_sortitions_processed(); let mut follower_sort_height = follower_channel.get_sortitions_processed(); eprintln!( - "Miner sort height: {}, follower sort height: {}", - miner_sort_height, follower_sort_height + "Miner sort height: {miner_sort_height}, follower sort height: {follower_sort_height}" ); while miner_sort_height < 210 && follower_sort_height < 210 { @@ -12695,8 +12464,7 @@ fn bitcoin_reorg_flap_with_follower() { miner_sort_height = miner_channel.get_sortitions_processed(); follower_sort_height = miner_channel.get_sortitions_processed(); eprintln!( - "Miner sort height: {}, follower sort height: {}", - miner_sort_height, follower_sort_height + "Miner sort height: {miner_sort_height}, follower sort height: {follower_sort_height}" ); } @@ -12709,7 +12477,7 @@ fn bitcoin_reorg_flap_with_follower() { new_conf.node.working_dir = format!("{}.new", &conf.node.working_dir); fs::create_dir_all(&new_conf.node.working_dir).unwrap(); - copy_dir_all(&btcd_dir, &new_conf.get_burnchain_path_str()).unwrap(); + copy_dir_all(&btcd_dir, new_conf.get_burnchain_path_str()).unwrap(); // resume let mut btcd_controller = BitcoinCoreController::new(conf.clone()); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 42b894398d..946a566c13 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -42,7 +42,6 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use stacks::chainstate::stacks::StacksPrivateKey; -use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; @@ -112,7 +111,7 @@ pub struct SignerTest { } impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest> { - fn new(num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>) -> Self { + pub fn new(num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>) -> Self { Self::new_with_config_modifications( num_signers, initial_balances, @@ -123,10 +122,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest (), - G: FnMut(&mut NeonConfig) -> (), - >( + fn new_with_config_modifications( num_signers: usize, initial_balances: Vec<(StacksAddress, u64)>, mut signer_config_modifier: F, @@ -151,8 +147,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest>()); @@ -330,10 +325,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest Result<(), String> { - // Make sure that ALL signers accepted the block proposal + // Make sure that at least 70% of signers accepted the block proposal wait_for(timeout_secs, || { let signatures = test_observer::get_stackerdb_chunks() .into_iter() @@ -597,7 +583,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>(); - Ok(signatures.len() == expected_signers.len()) + Ok(signatures.len() > expected_signers.len() * 7 / 10) }) } @@ -634,7 +620,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest ()>( +fn setup_stx_btc_node( mut naka_conf: NeonConfig, signer_stacks_private_keys: &[StacksPrivateKey], signer_configs: &[SignerConfig], diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 1744a3b4a8..2486043ccc 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -13,6 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::cmp::min; use std::collections::{HashMap, HashSet}; use std::ops::Add; use std::str::FromStr; @@ -21,7 +22,6 @@ use std::time::{Duration, Instant}; use std::{env, thread}; use clarity::vm::types::PrincipalData; -use clarity::vm::StacksEpoch; use libsigner::v0::messages::{ BlockRejection, BlockResponse, MessageSlotID, MinerSlotID, RejectCode, SignerMessage, }; @@ -43,7 +43,7 @@ use stacks::net::api::postblock_proposal::{ValidateRejectCode, TEST_VALIDATE_STA use stacks::net::relay::fault_injection::set_ignore_block; use stacks::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey}; use stacks::types::PublicKey; -use stacks::util::hash::{hex_bytes, MerkleHashFunc}; +use stacks::util::hash::{hex_bytes, Hash160, MerkleHashFunc}; use stacks::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ @@ -66,14 +66,16 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::config::{EventKeyType, EventObserverConfig}; use crate::event_dispatcher::MinedNakamotoBlockEvent; -use crate::nakamoto_node::miner::{TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL}; +use crate::nakamoto_node::miner::{ + TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, +}; use crate::nakamoto_node::sign_coordinator::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; use crate::tests::nakamoto_integrations::{ boot_to_epoch_25, boot_to_epoch_3_reward_set, next_block_and, next_block_and_controller, - setup_epoch_3_reward_set, wait_for, POX_4_DEFAULT_STACKER_BALANCE, - POX_4_DEFAULT_STACKER_STX_AMT, + next_block_and_process_new_stacks_block, setup_epoch_3_reward_set, wait_for, + POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ get_account, get_chain_info, get_chain_info_opt, next_block_and_wait, @@ -101,8 +103,7 @@ impl SignerTest { let lock_period = 12; let epochs = self.running_nodes.conf.burnchain.epochs.clone().unwrap(); - let epoch_25 = - &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch25).unwrap()]; + let epoch_25 = &epochs[StacksEpochId::Epoch25]; let epoch_25_start_height = epoch_25.start_height; // stack enough to activate pox-4 let block_height = self @@ -118,13 +119,13 @@ impl SignerTest { for stacker_sk in self.signer_stacks_private_keys.iter() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &stacker_sk, + stacker_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, CHAIN_ID_TESTNET, @@ -137,7 +138,7 @@ impl SignerTest { let signer_pk = StacksPublicKey::from_private(stacker_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, self.running_nodes.conf.burnchain.chain_id, @@ -227,7 +228,7 @@ impl SignerTest { } /// Run the test until the epoch 3 boundary - fn boot_to_epoch_3(&mut self) { + pub fn boot_to_epoch_3(&mut self) { boot_to_epoch_3_reward_set( &self.running_nodes.conf, &self.running_nodes.blocks_processed, @@ -246,7 +247,7 @@ impl SignerTest { .get_reward_set_signers(reward_cycle) .expect("Failed to check if reward set is calculated") .map(|reward_set| { - debug!("Signer set: {:?}", reward_set); + debug!("Signer set: {reward_set:?}"); }) .is_some()) }) @@ -304,10 +305,7 @@ impl SignerTest { // NOTE: signature.len() does not need to equal signers.len(); the stacks miner can finish the block // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); - info!( - "Verifying signatures against signers for reward cycle {:?}", - reward_cycle - ); + info!("Verifying signatures against signers for reward cycle {reward_cycle:?}"); let signers = self.get_reward_set_signers(reward_cycle); // Verify that the signers signed the proposed block @@ -421,6 +419,27 @@ impl SignerTest { } } +fn last_block_contains_tenure_change_tx(cause: TenureChangeCause) -> bool { + let blocks = test_observer::get_blocks(); + let last_block = &blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) if payload.cause == cause => { + info!("Found tenure change transaction: {parsed:?}"); + true + } + _ => false, + } +} + +fn verify_last_block_contains_tenure_change_tx(cause: TenureChangeCause) { + assert!(last_block_contains_tenure_change_tx(cause)); +} + #[test] #[ignore] /// Test that a signer can respond to an invalid block proposal @@ -432,7 +451,8 @@ impl SignerTest { /// The stacks node is advanced to epoch 3.0 reward set calculation to ensure the signer set is determined. /// An invalid block proposal is forcibly written to the miner's slot to simulate the miner proposing a block. /// The signers process the invalid block by first verifying it against the stacks node block proposal endpoint. -/// The signers then broadcast a rejection of the miner's proposed block back to the respective .signers-XXX-YYY contract. +/// The signer that submitted the initial block validation request, should issue a broadcast a rejection of the +/// miner's proposed block back to the respective .signers-XXX-YYY contract. /// /// Test Assertion: /// Each signer successfully rejects the invalid block proposal. @@ -456,6 +476,7 @@ fn block_proposal_rejection() { let proposal_conf = ProposalEvalConfig { first_proposal_burn_block_timing: Duration::from_secs(0), block_proposal_timeout: Duration::from_secs(100), + tenure_last_block_proposal_timeout: Duration::from_secs(30), }; let mut block = NakamotoBlock { header: NakamotoBlockHeader::empty(), @@ -789,7 +810,7 @@ fn reloads_signer_set_in() { let send_fee = 180; let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |_config| {}, |_| {}, None, @@ -807,7 +828,7 @@ fn reloads_signer_set_in() { let naka_conf = &signer_test.running_nodes.conf; let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3 = &epochs[StacksEpochId::Epoch30]; let reward_cycle_len = naka_conf.get_burnchain().pox_constants.reward_cycle_length as u64; let prepare_phase_len = naka_conf.get_burnchain().pox_constants.prepare_length as u64; @@ -848,7 +869,7 @@ fn reloads_signer_set_in() { } }; if let Some(ref set) = reward_set { - info!("Signer set: {:?}", set); + info!("Signer set: {set:?}"); } Ok(reward_set.is_some()) }) @@ -912,7 +933,7 @@ fn forked_tenure_testing( let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |config| { // make the duration long enough that the reorg attempt will definitely be accepted config.first_proposal_burn_block_timing = proposal_limit; @@ -920,7 +941,9 @@ fn forked_tenure_testing( // need) TEST_SKIP_BLOCK_BROADCAST.lock().unwrap().replace(true); }, - |_| {}, + |config| { + config.miner.tenure_cost_limit_per_block_percentage = None; + }, None, None, ); @@ -1003,10 +1026,7 @@ fn forked_tenure_testing( signer_test .running_nodes .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(true); + .set(true); TEST_BROADCAST_STALL.lock().unwrap().replace(false); // Wait for a stacks block to be broadcasted @@ -1019,7 +1039,7 @@ fn forked_tenure_testing( thread::sleep(Duration::from_secs(1)); } - info!("Tenure B broadcasted a block. Wait {post_btc_block_pause:?}, issue the next bitcon block, and un-stall block commits."); + info!("Tenure B broadcasted a block. Wait {post_btc_block_pause:?}, issue the next bitcoin block, and un-stall block commits."); thread::sleep(post_btc_block_pause); // the block will be stored, not processed, so load it out of staging @@ -1030,7 +1050,7 @@ fn forked_tenure_testing( .nakamoto_blocks_db() .get_nakamoto_tenure_start_blocks(&tip_sn.consensus_hash) .unwrap() - .get(0) + .first() .cloned() .unwrap(); @@ -1038,14 +1058,14 @@ fn forked_tenure_testing( let tip_b = StacksHeaderInfo { anchored_header: StacksBlockHeaderTypes::Nakamoto(tip_b_block.header.clone()), microblock_tail: None, - stacks_block_height: tip_b_block.header.chain_length.into(), + stacks_block_height: tip_b_block.header.chain_length, index_root: TrieHash([0x00; 32]), // we can't know this yet since the block hasn't been processed - consensus_hash: tip_b_block.header.consensus_hash.clone(), - burn_header_hash: tip_sn.burn_header_hash.clone(), + consensus_hash: tip_b_block.header.consensus_hash, + burn_header_hash: tip_sn.burn_header_hash, burn_header_height: tip_sn.block_height as u32, burn_header_timestamp: tip_sn.burn_header_timestamp, anchored_block_size: tip_b_block.serialize_to_vec().len() as u64, - burn_view: Some(tip_b_block.header.consensus_hash.clone()), + burn_view: Some(tip_b_block.header.consensus_hash), }; let blocks = test_observer::get_mined_nakamoto_blocks(); @@ -1075,19 +1095,15 @@ fn forked_tenure_testing( proposed_blocks.load(Ordering::SeqCst) }; let rejected_before = rejected_blocks.load(Ordering::SeqCst); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(false); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, || { - signer_test - .running_nodes - .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(false); - let commits_count = commits_submitted.load(Ordering::SeqCst); if commits_count > commits_before { // now allow block B to process if it hasn't already. @@ -1114,7 +1130,31 @@ fn forked_tenure_testing( && has_reject_count) }, ) - .unwrap(); + .unwrap_or_else(|_| { + let commits_count = commits_submitted.load(Ordering::SeqCst); + let rejected_count = rejected_blocks.load(Ordering::SeqCst); + // see above for comments + let (blocks_count, rbf_count, has_reject_count) = if expect_tenure_c { + (mined_blocks.load(Ordering::SeqCst), 1, true) + } else { + ( + proposed_blocks.load(Ordering::SeqCst), + 0, + rejected_count > rejected_before, + ) + }; + error!("Tenure C failed to produce a block"; + "commits_count" => commits_count, + "commits_before" => commits_before, + "rbf_count" => rbf_count as u64, + "blocks_count" => blocks_count, + "blocks_before" => blocks_before, + "rejected_count" => rejected_count, + "rejected_before" => rejected_before, + "has_reject_count" => has_reject_count, + ); + panic!(); + }); // allow blocks B and C to be processed sleep_ms(1000); @@ -1227,10 +1267,8 @@ fn bitcoind_forking_test() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); let conf = signer_test.running_nodes.conf.clone(); let http_origin = format!("http://{}", &conf.node.rpc_bind); let miner_address = Keychain::default(conf.node.seed.clone()) @@ -1339,7 +1377,7 @@ fn bitcoind_forking_test() { let post_fork_1_nonce = get_account(&http_origin, &miner_address).nonce; - assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 1 * 2); + assert_eq!(post_fork_1_nonce, pre_fork_1_nonce - 2); for i in 0..5 { info!("Mining post-fork tenure {} of 5", i + 1); @@ -1460,12 +1498,13 @@ fn multiple_miners() { let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); let mut node_2_listeners = Vec::new(); + let max_nakamoto_tenures = 30; // partition the signer set so that ~half are listening and using node 1 for RPC and events, // and the rest are using node 2 let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -1481,6 +1520,7 @@ fn multiple_miners() { config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -1502,7 +1542,7 @@ fn multiple_miners() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let conf = signer_test.running_nodes.conf.clone(); @@ -1523,7 +1563,7 @@ fn multiple_miners() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -1560,8 +1600,6 @@ fn multiple_miners() { info!("------------------------- Reached Epoch 3.0 -------------------------"); - let max_nakamoto_tenures = 20; - // due to the random nature of mining sortitions, the way this test is structured // is that we keep track of how many tenures each miner produced, and once enough sortitions // have been produced such that each miner has produced 3 tenures, we stop and check the @@ -1583,10 +1621,7 @@ fn multiple_miners() { let info_1 = get_chain_info(&conf); let info_2 = get_chain_info(&conf_node_2); - info!( - "Issue next block-build request\ninfo 1: {:?}\ninfo 2: {:?}\n", - &info_1, &info_2 - ); + info!("Issue next block-build request\ninfo 1: {info_1:?}\ninfo 2: {info_2:?}\n"); signer_test.mine_block_wait_on_processing( &[&rl1_coord_channels, &rl2_coord_channels], @@ -1597,10 +1632,8 @@ fn multiple_miners() { btc_blocks_mined += 1; let blocks = get_nakamoto_headers(&conf); // for this test, there should be one block per tenure - let consensus_hash_set: HashSet<_> = blocks - .iter() - .map(|header| header.consensus_hash.clone()) - .collect(); + let consensus_hash_set: HashSet<_> = + blocks.iter().map(|header| header.consensus_hash).collect(); assert_eq!( consensus_hash_set.len(), blocks.len(), @@ -1645,11 +1678,11 @@ fn multiple_miners() { assert_eq!(peer_1_height, peer_2_height); assert_eq!( peer_1_height, - pre_nakamoto_peer_1_height + btc_blocks_mined - 1 + pre_nakamoto_peer_1_height + btc_blocks_mined as u64 - 1 ); assert_eq!( btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + u32::try_from(miner_1_tenures + miner_2_tenures).unwrap() ); rl2_coord_channels @@ -1664,17 +1697,10 @@ fn multiple_miners() { /// Read processed nakamoto block IDs from the test observer, and use `config` to open /// a chainstate DB and returns their corresponding StacksHeaderInfos fn get_nakamoto_headers(config: &Config) -> Vec { - let nakamoto_block_ids: Vec<_> = test_observer::get_blocks() + let nakamoto_block_ids: HashSet<_> = test_observer::get_blocks() .into_iter() .filter_map(|block_json| { - if block_json - .as_object() - .unwrap() - .get("miner_signature") - .is_none() - { - return None; - } + block_json.as_object().unwrap().get("miner_signature")?; let block_id = StacksBlockId::from_hex( &block_json .as_object() @@ -1746,12 +1772,14 @@ fn miner_forking() { let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); let mut node_2_listeners = Vec::new(); + let max_sortitions = 30; + // partition the signer set so that ~half are listening and using node 1 for RPC and events, // and the rest are using node 2 let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -1776,6 +1804,9 @@ fn miner_forking() { config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_sortitions as u32); + config.miner.block_commit_delay = Duration::from_secs(0); + config.miner.tenure_cost_limit_per_block_percentage = None; config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { @@ -1792,16 +1823,15 @@ fn miner_forking() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); - let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.rpc_bind = node_2_rpc_bind; + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); conf_node_2.node.seed = btc_miner_2_seed.clone(); conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); @@ -1814,7 +1844,7 @@ fn miner_forking() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -1824,8 +1854,8 @@ fn miner_forking() { let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); let Counters { - naka_skip_commit_op, - naka_submitted_commits: second_miner_commits_submitted, + naka_skip_commit_op: skip_commit_op_rl2, + naka_submitted_commits: commits_submitted_rl2, .. } = run_loop_2.counters(); let _run_loop_2_thread = thread::Builder::new() @@ -1846,152 +1876,273 @@ fn miner_forking() { }) .expect("Timed out waiting for boostrapped node to catch up to the miner"); + let commits_submitted_rl1 = signer_test.running_nodes.commits_submitted.clone(); + let skip_commit_op_rl1 = signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .clone(); + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; - naka_skip_commit_op.0.lock().unwrap().replace(false); + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; info!("------------------------- Reached Epoch 3.0 -------------------------"); - let mut sortitions_seen = Vec::new(); - let run_sortition = || { - info!("Pausing stacks block proposal to force an empty tenure commit from RL2"); - TEST_BROADCAST_STALL.lock().unwrap().replace(true); + info!("Pausing both miners' block commit submissions"); + skip_commit_op_rl1.set(true); + skip_commit_op_rl2.set(true); - let rl2_commits_before = second_miner_commits_submitted.load(Ordering::SeqCst); - let rl1_commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); + info!("Flushing any pending commits to enable custom winner selection"); + let burn_height_before = get_burn_height(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); - naka_skip_commit_op.0.lock().unwrap().replace(false); + info!("------------------------- RL1 Wins Sortition -------------------------"); + info!("Pausing stacks block proposal to force an empty tenure commit from RL2"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); - // wait until a commit is submitted by run_loop_2 - wait_for(60, || { - let commits_count = second_miner_commits_submitted.load(Ordering::SeqCst); - Ok(commits_count > rl2_commits_before) - }) - .unwrap(); - // wait until a commit is submitted by run_loop_1 - wait_for(60, || { - let commits_count = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits_count > rl1_commits_before) - }) + info!("Unpausing commits from RL1"); + skip_commit_op_rl1.set(false); + + info!("Waiting for commits from RL1"); + wait_for(30, || { + Ok(commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); + + info!("Pausing commits from RL1"); + skip_commit_op_rl1.set(true); + + let burn_height_before = get_burn_height(); + info!("Mine RL1 Tenure"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + // fetch the current sortition info + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // make sure the tenure was won by RL1 + assert!(tip.sortition, "No sortition was won"); + assert_eq!( + tip.miner_pk_hash.unwrap(), + mining_pkh_1, + "RL1 did not win the sortition" + ); + + info!( + "------------------------- RL2 Wins Sortition With Outdated View -------------------------" + ); + let rl2_commits_before = commits_submitted_rl2.load(Ordering::SeqCst); + + info!("Unpausing commits from RL2"); + skip_commit_op_rl2.set(false); + + info!("Waiting for commits from RL2"); + wait_for(30, || { + Ok(commits_submitted_rl2.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); + + info!("Pausing commits from RL2"); + skip_commit_op_rl2.set(true); + + // unblock block mining + let blocks_len = test_observer::get_blocks().len(); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + // Wait for the block to be broadcasted and processed + wait_for(30, || Ok(test_observer::get_blocks().len() > blocks_len)) + .expect("Timed out waiting for a block to be processed"); + + // sleep for 2*first_proposal_burn_block_timing to prevent the block timing from allowing a fork by the signer set + thread::sleep(Duration::from_secs(first_proposal_burn_block_timing * 2)); + + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) + .into_iter() + .map(|header| { + info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %tip.consensus_hash); + (header.consensus_hash, header) + }) + .collect(); + + let header_info = nakamoto_headers.get(&tip.consensus_hash).unwrap(); + let header = header_info + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .clone(); + + mining_pk_1 + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) .unwrap(); - // fetch the current sortition info - let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let burn_height_before = get_burn_height(); + info!("Mine RL2 Tenure"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); - // block commits from RL2 -- this will block until the start of the next iteration - // in this loop. - naka_skip_commit_op.0.lock().unwrap().replace(true); - // ensure RL1 performs an RBF after unblock block broadcast - let rl1_commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); + wait_for(60, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("RL1 did not produce a tenure extend block"); - // unblock block mining - let blocks_len = test_observer::get_blocks().len(); - TEST_BROADCAST_STALL.lock().unwrap().replace(false); + // fetch the current sortition info + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // make sure the tenure was won by RL2 + assert!(tip.sortition, "No sortition was won"); + assert_eq!( + tip.miner_pk_hash.unwrap(), + mining_pkh_2, + "RL2 did not win the sortition" + ); - // wait for a block to be processed (or timeout!) - if let Err(_) = wait_for(60, || Ok(test_observer::get_blocks().len() > blocks_len)) { - info!("Timeout waiting for a block process: assuming this is because RL2 attempted to fork-- will check at end of test"); - return (sort_tip, false); - } + let header_info = get_nakamoto_headers(&conf).into_iter().last().unwrap(); + let header = header_info + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .clone(); - info!("Nakamoto block processed, waiting for commit from RL1"); + mining_pk_1 + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .expect("RL1 did not produce our last block"); - // wait for a commit from RL1 - wait_for(60, || { - let commits_count = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits_count > rl1_commits_before) + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) + .into_iter() + .map(|header| { + info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %tip.consensus_hash); + (header.consensus_hash, header) }) - .unwrap(); + .collect(); - // sleep for 2*first_proposal_burn_block_timing to prevent the block timing from allowing a fork by the signer set - thread::sleep(Duration::from_secs(first_proposal_burn_block_timing * 2)); - (sort_tip, true) - }; + assert!( + !nakamoto_headers.contains_key(&tip.consensus_hash), + "RL1 produced a block with the current consensus hash." + ); - let mut won_by_miner_2_but_no_tenure = false; - let mut won_by_miner_1_after_tenureless_miner_2 = false; - let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); - // miner 2 is expected to be valid iff: - // (a) its the first nakamoto tenure - // (b) the prior sortition didn't have a tenure (because by this time RL2 will have up-to-date block processing) - let mut expects_miner_2_to_be_valid = true; - let max_sortitions = 20; - // due to the random nature of mining sortitions, the way this test is structured - // is that keeps track of two scenarios that we want to cover, and once enough sortitions - // have been produced to cover those scenarios, it stops and checks the results at the end. - while !(won_by_miner_2_but_no_tenure && won_by_miner_1_after_tenureless_miner_2) { - let nmb_sortitions_seen = sortitions_seen.len(); - assert!(max_sortitions >= nmb_sortitions_seen, "Produced {nmb_sortitions_seen} sortitions, but didn't cover the test scenarios, aborting"); - let (sortition_data, had_tenure) = run_sortition(); - sortitions_seen.push((sortition_data.clone(), had_tenure)); - - let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) - .into_iter() - .map(|header| { - info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %sortition_data.consensus_hash); - (header.consensus_hash.clone(), header) - }) - .collect(); + info!("------------------------- RL1 RBFs its Own Commit -------------------------"); + info!("Pausing stacks block proposal to test RBF capability"); + TEST_BROADCAST_STALL.lock().unwrap().replace(true); + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); - if had_tenure { - let header_info = nakamoto_headers - .get(&sortition_data.consensus_hash) - .unwrap(); - let header = header_info - .anchored_header - .as_stacks_nakamoto() - .unwrap() - .clone(); - let mined_by_miner_1 = miner_1_pk - .verify( - header.miner_signature_hash().as_bytes(), - &header.miner_signature, - ) - .unwrap(); + info!("Unpausing commits from RL1"); + skip_commit_op_rl1.set(false); - info!("Block check"; - "height" => header.chain_length, - "consensus_hash" => %header.consensus_hash, - "block_hash" => %header.block_hash(), - "stacks_block_id" => %header.block_id(), - "mined_by_miner_1?" => mined_by_miner_1, - "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); - if !mined_by_miner_1 { - assert!(expects_miner_2_to_be_valid, "If a block was produced by miner 2, we should have expected miner 2 to be valid"); - } else if won_by_miner_2_but_no_tenure { - // the tenure was won by miner 1, they produced a block, and this follows a tenure that miner 2 won but couldn't - // mine during because they tried to fork. - won_by_miner_1_after_tenureless_miner_2 = true; - } + info!("Waiting for commits from RL1"); + wait_for(30, || { + Ok(commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); - // even if it was mined by miner 2, their next block commit should be invalid! - expects_miner_2_to_be_valid = false; - } else { - info!("Sortition without tenure"; "expects_miner_2_to_be_valid?" => expects_miner_2_to_be_valid); - assert!(nakamoto_headers - .get(&sortition_data.consensus_hash) - .is_none()); - assert!(!expects_miner_2_to_be_valid, "If no blocks were produced in the tenure, it should be because miner 2 committed to a fork"); - won_by_miner_2_but_no_tenure = true; - expects_miner_2_to_be_valid = true; - } - } + info!("Pausing commits from RL1"); + skip_commit_op_rl1.set(true); + + let burn_height_before = get_burn_height(); + info!("Mine RL1 Tenure"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); + + info!("Unpausing commits from RL1"); + skip_commit_op_rl1.set(false); + + info!("Waiting for commits from RL1"); + wait_for(30, || { + Ok(commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); + + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); + // unblock block mining + let blocks_len = test_observer::get_blocks().len(); + TEST_BROADCAST_STALL.lock().unwrap().replace(false); + + // Wait for the block to be broadcasted and processed + wait_for(30, || Ok(test_observer::get_blocks().len() > blocks_len)) + .expect("Timed out waiting for a block to be processed"); + + info!("Ensure that RL1 performs an RBF after unblocking block broadcast"); + wait_for(30, || { + Ok(commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before) + }) + .expect("Timed out waiting for miner 1 to RBF its old commit op"); + + info!("Mine RL1 Tenure"); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + // fetch the current sortition info + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // make sure the tenure was won by RL1 + assert!(tip.sortition, "No sortition was won"); + assert_eq!( + tip.miner_pk_hash.unwrap(), + mining_pkh_1, + "RL1 did not win the sortition" + ); + + let nakamoto_headers: HashMap<_, _> = get_nakamoto_headers(&conf) + .into_iter() + .map(|header| { + info!("Nakamoto block"; "height" => header.stacks_block_height, "consensus_hash" => %header.consensus_hash, "last_sortition_hash" => %tip.consensus_hash); + (header.consensus_hash, header) + }) + .collect(); + + let header_info = nakamoto_headers.get(&tip.consensus_hash).unwrap(); + let header = header_info + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .clone(); + + mining_pk_1 + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap(); + + info!("------------------------- Verify Peer Data -------------------------"); let peer_1_height = get_chain_info(&conf).stacks_tip_height; let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; @@ -2033,10 +2184,8 @@ fn end_of_tenure() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let long_timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(20); @@ -2184,10 +2333,8 @@ fn retry_on_rejection() { let send_fee = 180; let short_timeout = Duration::from_secs(30); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * 3)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 3)]); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); @@ -2197,7 +2344,7 @@ fn retry_on_rejection() { let sortdb = burnchain.open_sortition_db(true).unwrap(); wait_for(30, || { - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); Ok(tip.sortition) }) .expect("Timed out waiting for sortition"); @@ -2323,10 +2470,8 @@ fn signers_broadcast_signed_blocks() { let send_amt = 100; let send_fee = 180; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - ); + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, send_amt + send_fee)]); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); @@ -2344,8 +2489,8 @@ fn signers_broadcast_signed_blocks() { .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); debug!( - "blocks_mined: {},{}, stacks_tip_height: {},{}", - blocks_mined, blocks_before, info.stacks_tip_height, info_before.stacks_tip_height + "blocks_mined: {blocks_mined},{blocks_before}, stacks_tip_height: {},{}", + info.stacks_tip_height, info_before.stacks_tip_height ); Ok(blocks_mined > blocks_before && info.stacks_tip_height > info_before.stacks_tip_height) }) @@ -2387,11 +2532,7 @@ fn signers_broadcast_signed_blocks() { .load(Ordering::SeqCst); let info = get_chain_info(&signer_test.running_nodes.conf); debug!( - "blocks_mined: {},{}, signers_pushed: {},{}, stacks_tip_height: {},{}", - blocks_mined, - blocks_before, - signer_pushed, - signer_pushed_before, + "blocks_mined: {blocks_mined},{blocks_before}, signers_pushed: {signer_pushed},{signer_pushed_before}, stacks_tip_height: {},{}", info.stacks_tip_height, info_before.stacks_tip_height ); @@ -2431,7 +2572,7 @@ fn empty_sortition() { let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |config| { // make the duration long enough that the miner will be marked as malicious config.block_proposal_timeout = block_proposal_timeout; @@ -2498,10 +2639,7 @@ fn empty_sortition() { signer_test .running_nodes .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(true); + .set(true); let blocks_after = signer_test .running_nodes @@ -2592,8 +2730,13 @@ fn empty_sortition() { #[test] #[ignore] -/// This test checks that Epoch 2.5 signers will issue a mock signature per burn block they receive. -fn mock_sign_epoch_25() { +/// This test checks the behavior of signers when an empty sortition arrives +/// before the first block of the previous tenure has been approved. +/// Specifically: +/// - The empty sortition will trigger the miner to attempt a tenure extend. +/// - Signers will accept the tenure extend and sign subsequent blocks built +/// off the old sortition +fn empty_sortition_before_approval() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -2609,54 +2752,355 @@ fn mock_sign_epoch_25() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let block_proposal_timeout = Duration::from_secs(20); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], - |_| {}, - |node_config| { - node_config.miner.pre_nakamoto_mock_signing = true; - let epochs = node_config.burnchain.epochs.as_mut().unwrap(); - for epoch in epochs.iter_mut() { - if epoch.epoch_id == StacksEpochId::Epoch25 { - epoch.end_height = 251; - } - if epoch.epoch_id == StacksEpochId::Epoch30 { - epoch.start_height = 251; - } - } + vec![(sender_addr, send_amt + send_fee)], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; }, + |_| {}, None, None, ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let epochs = signer_test - .running_nodes - .conf - .burnchain - .epochs - .clone() - .unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; - let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary - - signer_test.boot_to_epoch_25_reward_cycle(); + signer_test.boot_to_epoch_3(); - info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .unwrap(); - // Mine until epoch 3.0 and ensure that no more mock signatures are received - let reward_cycle = signer_test.get_current_reward_cycle(); - let signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); - assert_eq!(signer_slot_ids.len(), num_signers); + let info = get_chain_info(&signer_test.running_nodes.conf); + let burn_height_before = info.burn_block_height; + let stacks_height_before = info.stacks_tip_height; - let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); + info!("Forcing miner to ignore signatures for next block"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); - // Mine until epoch 3.0 and ensure we get a new mock block per epoch 2.5 sortition + info!("Pausing block commits to trigger an empty sortition."); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); + + info!("------------------------- Test Mine Tenure A -------------------------"); + let proposed_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + // Mine a regular tenure and wait for a block proposal + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let proposed_count = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + Ok(proposed_count > proposed_before) + }, + ) + .expect("Failed to mine tenure A and propose a block"); + + info!("------------------------- Test Mine Empty Tenure B -------------------------"); + + // Trigger an empty tenure + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + Ok(burn_height == burn_height_before + 2) + }, + ) + .expect("Failed to mine empty tenure"); + + info!("Unpause block commits"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(false); + + info!("Stop ignoring signers and wait for the tip to advance"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip"); + + let info = get_chain_info(&signer_test.running_nodes.conf); + info!("Current state: {:?}", info); + + // Wait for a block with a tenure extend to be mined + wait_for(60, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for tenure extend"); + + let stacks_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip with STX transfer"); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .expect("Failed to mine a normal tenure after the tenure extend"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test checks the behavior of signers when an empty sortition arrives +/// before the first block of the previous tenure has been proposed. +/// Specifically: +/// - The empty sortition will trigger the miner to attempt a tenure extend. +/// - Signers will accept the tenure extend and sign subsequent blocks built +/// off the old sortition +fn empty_sortition_before_proposal() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let block_proposal_timeout = Duration::from_secs(20); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; + }, + |_| {}, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .unwrap(); + + let info = get_chain_info(&signer_test.running_nodes.conf); + let stacks_height_before = info.stacks_tip_height; + + info!("Pause block commits to ensure we get an empty sortition"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(true); + + info!("Pause miner so it doesn't propose a block before the next tenure arrives"); + TEST_MINE_STALL.lock().unwrap().replace(true); + + let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + + info!("------------------------- Test Mine Tenure A and B -------------------------"); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(2); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.burn_block_height == burn_height_before + 2) + }) + .expect("Failed to advance chain tip"); + + // Sleep a bit more to ensure the signers see both burn blocks + sleep_ms(5_000); + + info!("Unpause miner"); + TEST_MINE_STALL.lock().unwrap().replace(false); + + info!("Unpause block commits"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .0 + .lock() + .unwrap() + .replace(false); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip"); + + let info = get_chain_info(&signer_test.running_nodes.conf); + info!("Current state: {:?}", info); + + // Wait for a block with a tenure extend to be mined + wait_for(60, || { + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + info!("Last block mined: {:?}", last_block); + for tx in last_block["transactions"].as_array().unwrap() { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::TenureChange(payload) = &parsed.payload { + match payload.cause { + TenureChangeCause::Extended => { + info!("Found tenure extend block"); + return Ok(true); + } + TenureChangeCause::BlockFound => {} + } + }; + } + Ok(false) + }) + .expect("Timed out waiting for tenure extend"); + + let stacks_height_before = get_chain_info(&signer_test.running_nodes.conf).stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > stacks_height_before) + }) + .expect("Failed to advance chain tip with STX transfer"); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .expect("Failed to mine a normal tenure after the tenure extend"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test checks that Epoch 2.5 signers will issue a mock signature per burn block they receive. +fn mock_sign_epoch_25() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |_| {}, + |node_config| { + node_config.miner.pre_nakamoto_mock_signing = true; + let epochs = node_config.burnchain.epochs.as_mut().unwrap(); + epochs[StacksEpochId::Epoch25].end_height = 251; + epochs[StacksEpochId::Epoch30].start_height = 251; + }, + None, + None, + ); + + let epochs = signer_test + .running_nodes + .conf + .burnchain + .epochs + .clone() + .unwrap(); + let epoch_3 = &epochs[StacksEpochId::Epoch30]; + let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary + + signer_test.boot_to_epoch_25_reward_cycle(); + + info!("------------------------- Test Processing Epoch 2.5 Tenures -------------------------"); + + // Mine until epoch 3.0 and ensure that no more mock signatures are received + let reward_cycle = signer_test.get_current_reward_cycle(); + let signer_slot_ids: Vec<_> = signer_test + .get_signer_indices(reward_cycle) + .iter() + .map(|id| id.0) + .collect(); + let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); + assert_eq!(signer_slot_ids.len(), num_signers); + + let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); + + // Mine until epoch 3.0 and ensure we get a new mock block per epoch 2.5 sortition let main_poll_time = Instant::now(); // Only advance to the boundary as the epoch 2.5 miner will be shut down at this point. while signer_test @@ -2762,7 +3206,7 @@ fn multiple_miners_mock_sign_epoch_25() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, send_amt + send_fee)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -2783,14 +3227,8 @@ fn multiple_miners_mock_sign_epoch_25() { config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); config.miner.pre_nakamoto_mock_signing = true; let epochs = config.burnchain.epochs.as_mut().unwrap(); - for epoch in epochs.iter_mut() { - if epoch.epoch_id == StacksEpochId::Epoch25 { - epoch.end_height = 251; - } - if epoch.epoch_id == StacksEpochId::Epoch30 { - epoch.start_height = 251; - } - } + epochs[StacksEpochId::Epoch25].end_height = 251; + epochs[StacksEpochId::Epoch30].start_height = 251; config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { warn!( @@ -2806,16 +3244,16 @@ fn multiple_miners_mock_sign_epoch_25() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); let localhost = "127.0.0.1"; - conf_node_2.node.rpc_bind = format!("{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_bind = format!("{}:{}", localhost, node_2_p2p); - conf_node_2.node.data_url = format!("http://{}:{}", localhost, node_2_rpc); - conf_node_2.node.p2p_address = format!("{}:{}", localhost, node_2_p2p); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); conf_node_2.node.seed = btc_miner_2_seed.clone(); conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); @@ -2828,7 +3266,7 @@ fn multiple_miners_mock_sign_epoch_25() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -2849,7 +3287,7 @@ fn multiple_miners_mock_sign_epoch_25() { .epochs .clone() .unwrap(); - let epoch_3 = &epochs[StacksEpoch::find_epoch_by_id(&epochs, StacksEpochId::Epoch30).unwrap()]; + let epoch_3 = &epochs[StacksEpochId::Epoch30]; let epoch_3_boundary = epoch_3.start_height - 1; // We only advance to the boundary as epoch 2.5 miner gets torn down at the boundary signer_test.boot_to_epoch_25_reward_cycle(); @@ -2952,17 +3390,13 @@ fn signer_set_rollover() { let new_num_signers = 4; let new_signer_private_keys: Vec<_> = (0..new_num_signers) - .into_iter() .map(|_| StacksPrivateKey::new()) .collect(); let new_signer_public_keys: Vec<_> = new_signer_private_keys .iter() .map(|sk| Secp256k1PublicKey::from_private(sk).to_bytes_compressed()) .collect(); - let new_signer_addresses: Vec<_> = new_signer_private_keys - .iter() - .map(|sk| tests::to_addr(sk)) - .collect(); + let new_signer_addresses: Vec<_> = new_signer_private_keys.iter().map(tests::to_addr).collect(); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; @@ -2971,15 +3405,15 @@ fn signer_set_rollover() { let mut initial_balances = new_signer_addresses .iter() - .map(|addr| (addr.clone(), POX_4_DEFAULT_STACKER_BALANCE)) + .map(|addr| (*addr, POX_4_DEFAULT_STACKER_BALANCE)) .collect::>(); - initial_balances.push((sender_addr.clone(), (send_amt + send_fee) * 4)); + initial_balances.push((sender_addr, (send_amt + send_fee) * 4)); let run_stamp = rand::random(); let rpc_port = 51024; - let rpc_bind = format!("127.0.0.1:{}", rpc_port); + let rpc_bind = format!("127.0.0.1:{rpc_port}"); // Setup the new signers that will take over let new_signer_configs = build_signer_config_tomls( @@ -2996,12 +3430,11 @@ fn signer_set_rollover() { None, ); - let new_spawned_signers: Vec<_> = (0..new_num_signers) - .into_iter() - .map(|i| { + let new_spawned_signers: Vec<_> = new_signer_configs + .iter() + .map(|conf| { info!("spawning signer"); - let signer_config = - SignerConfig::load_from_str(&new_signer_configs[i as usize]).unwrap(); + let signer_config = SignerConfig::load_from_str(conf).unwrap(); SpawnedSigner::new(signer_config) }) .collect(); @@ -3046,7 +3479,7 @@ fn signer_set_rollover() { // Verify that naka_conf has our new signer's event observers for toml in &new_signer_configs { - let signer_config = SignerConfig::load_from_str(&toml).unwrap(); + let signer_config = SignerConfig::load_from_str(toml).unwrap(); let endpoint = format!("{}", signer_config.endpoint); assert!(signer_test .running_nodes @@ -3071,7 +3504,7 @@ fn signer_set_rollover() { info!("---- Verifying that the current signers are the old signers ----"); let current_signers = signer_test.get_reward_set_signers(reward_cycle); - assert_eq!(current_signers.len(), num_signers as usize); + assert_eq!(current_signers.len(), num_signers); // Verify that the current signers are the same as the old signers for signer in current_signers.iter() { assert!(signer_test_public_keys.contains(&signer.signing_key.to_vec())); @@ -3116,13 +3549,13 @@ fn signer_set_rollover() { for stacker_sk in new_signer_private_keys.iter() { let pox_addr = PoxAddress::from_legacy( AddressHashMode::SerializeP2PKH, - tests::to_addr(&stacker_sk).bytes, + tests::to_addr(stacker_sk).bytes, ); let pox_addr_tuple: clarity::vm::Value = pox_addr.clone().as_clarity_tuple().unwrap().into(); let signature = make_pox_4_signer_key_signature( &pox_addr, - &stacker_sk, + stacker_sk, reward_cycle.into(), &Pox4SignatureTopic::StackStx, CHAIN_ID_TESTNET, @@ -3135,7 +3568,7 @@ fn signer_set_rollover() { let signer_pk = Secp256k1PublicKey::from_private(stacker_sk); let stacking_tx = tests::make_contract_call( - &stacker_sk, + stacker_sk, 0, 1000, signer_test.running_nodes.conf.burnchain.chain_id, @@ -3189,10 +3622,7 @@ fn signer_set_rollover() { assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); } - info!( - "---- Mining to the next reward cycle (block {}) -----", - next_cycle_height - ); + info!("---- Mining to the next reward cycle (block {next_cycle_height}) -----",); signer_test.run_until_burnchain_height_nakamoto( Duration::from_secs(60), next_cycle_height, @@ -3203,7 +3633,7 @@ fn signer_set_rollover() { info!("---- Verifying that the current signers are the new signers ----"); let current_signers = signer_test.get_reward_set_signers(new_reward_cycle); - assert_eq!(current_signers.len(), new_num_signers as usize); + assert_eq!(current_signers.len(), new_num_signers); for signer in current_signers.iter() { assert!(!signer_test_public_keys.contains(&signer.signing_key.to_vec())); assert!(new_signer_public_keys.contains(&signer.signing_key.to_vec())); @@ -3242,7 +3672,8 @@ fn signer_set_rollover() { #[test] #[ignore] -/// This test checks that the signers will broadcast a block once they receive enough signatures. +/// This test checks that the miners and signers will not produce Nakamoto blocks +/// until the minimum time has passed between blocks. fn min_gap_between_blocks() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; @@ -3259,11 +3690,13 @@ fn min_gap_between_blocks() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; + + let interim_blocks = 5; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let time_between_blocks_ms = 10_000; let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), send_amt + send_fee)], + vec![(sender_addr, (send_amt + send_fee) * interim_blocks)], |_config| {}, |config| { config.miner.min_time_between_blocks_ms = time_between_blocks_ms; @@ -3276,73 +3709,80 @@ fn min_gap_between_blocks() { signer_test.boot_to_epoch_3(); - info!("Ensure that the first Nakamoto block is mined after the gap is exceeded"); + info!("Ensure that the first Nakamoto block was mined"); let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); assert_eq!(blocks.len(), 1); - let first_block = blocks.last().unwrap(); - let blocks = test_observer::get_blocks(); - let parent = blocks - .iter() - .find(|b| b.get("block_height").unwrap() == first_block.stacks_block_height - 1) - .unwrap(); - let first_block_time = first_block - .anchored_header - .as_stacks_nakamoto() - .unwrap() - .timestamp; - let parent_block_time = parent.get("burn_block_time").unwrap().as_u64().unwrap(); - assert!( - Duration::from_secs(first_block_time - parent_block_time) - >= Duration::from_millis(time_between_blocks_ms), - "First block proposed before gap was exceeded: {}s - {}s > {}ms", - first_block_time, - parent_block_time, - time_between_blocks_ms - ); + // mine the interim blocks + info!("Mining interim blocks"); + for interim_block_ix in 0..interim_blocks { + let blocks_processed_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + interim_block_ix, // same as the sender nonce + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); - // Submit a tx so that the miner will mine a block - let sender_nonce = 0; - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); + info!("Submitted transfer tx and waiting for block to be processed"); + wait_for(60, || { + let blocks_processed = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + info!("Mined interim block:{interim_block_ix}"); + } - info!("Submitted transfer tx and waiting for block to be processed. Ensure it does not arrive before the gap is exceeded"); wait_for(60, || { - let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); - Ok(blocks.len() >= 2) + let new_blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + Ok(new_blocks.len() == blocks.len() + interim_blocks as usize) }) .unwrap(); - // Verify that the second Nakamoto block is mined after the gap is exceeded - let blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); - let last_block = blocks.last().unwrap(); - let last_block_time = last_block - .anchored_header - .as_stacks_nakamoto() - .unwrap() - .timestamp; - assert!(blocks.len() >= 2, "Expected at least 2 mined blocks"); - let penultimate_block = blocks.get(blocks.len() - 2).unwrap(); - let penultimate_block_time = penultimate_block - .anchored_header - .as_stacks_nakamoto() - .unwrap() - .timestamp; - assert!( - Duration::from_secs(last_block_time - penultimate_block_time) - >= Duration::from_millis(time_between_blocks_ms), - "Block proposed before gap was exceeded: {}s - {}s > {}ms", - last_block_time, - penultimate_block_time, - time_between_blocks_ms - ); - + // Verify that every Nakamoto block is mined after the gap is exceeded between each + let mut blocks = get_nakamoto_headers(&signer_test.running_nodes.conf); + blocks.sort_by(|a, b| a.stacks_block_height.cmp(&b.stacks_block_height)); + for i in 1..blocks.len() { + let block = &blocks[i]; + let parent_block = &blocks[i - 1]; + assert_eq!( + block.stacks_block_height, + parent_block.stacks_block_height + 1 + ); + info!( + "Checking that the time between blocks {} and {} is respected", + parent_block.stacks_block_height, block.stacks_block_height + ); + let block_time = block + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .timestamp; + let parent_block_time = parent_block + .anchored_header + .as_stacks_nakamoto() + .unwrap() + .timestamp; + assert!( + block_time > parent_block_time, + "Block time is BEFORE parent block time" + ); + assert!( + Duration::from_secs(block_time - parent_block_time) + >= Duration::from_millis(time_between_blocks_ms), + "Block mined before gap was exceeded: {block_time}s - {parent_block_time}s > {time_between_blocks_ms}ms", + ); + } + debug!("Shutting down min_gap_between_blocks test"); signer_test.shutdown(); } @@ -3413,7 +3853,7 @@ fn duplicate_signers() { }) .filter_map(|message| match message { SignerMessage::BlockResponse(BlockResponse::Accepted(m)) => { - info!("Message(accepted): {:?}", &m); + info!("Message(accepted): {m:?}"); Some(m) } _ => { @@ -3490,7 +3930,7 @@ fn multiple_miners_with_nakamoto_blocks() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![( - sender_addr.clone(), + sender_addr, (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { @@ -3529,7 +3969,7 @@ fn multiple_miners_with_nakamoto_blocks() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -3552,7 +3992,7 @@ fn multiple_miners_with_nakamoto_blocks() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -3654,10 +4094,7 @@ fn multiple_miners_with_nakamoto_blocks() { Ok(blocks_processed > blocks_processed_before) }) .unwrap(); - info!( - "Mined interim block {}:{}", - btc_blocks_mined, interim_block_ix - ); + info!("Mined interim block {btc_blocks_mined}:{interim_block_ix}"); } let blocks = get_nakamoto_headers(&conf); @@ -3668,7 +4105,7 @@ fn multiple_miners_with_nakamoto_blocks() { if seen_burn_hashes.contains(&header.burn_header_hash) { continue; } - seen_burn_hashes.insert(header.burn_header_hash.clone()); + seen_burn_hashes.insert(header.burn_header_hash); let header = header.anchored_header.as_stacks_nakamoto().unwrap(); if miner_1_pk @@ -3690,10 +4127,7 @@ fn multiple_miners_with_nakamoto_blocks() { miner_2_tenures += 1; } } - info!( - "Miner 1 tenures: {}, Miner 2 tenures: {}", - miner_1_tenures, miner_2_tenures - ); + info!("Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}"); } info!( @@ -3711,10 +4145,7 @@ fn multiple_miners_with_nakamoto_blocks() { peer_1_height, pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) ); - assert_eq!( - btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() - ); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); rl2_coord_channels .lock() .expect("Mutex poisoned") @@ -3764,11 +4195,12 @@ fn partial_tenure_fork() { let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, vec![( - sender_addr.clone(), + sender_addr, (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, )], |signer_config| { signer_config.node_host = node_1_rpc_bind.clone(); + signer_config.first_proposal_burn_block_timing = Duration::from_secs(0); }, |config| { config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); @@ -3777,6 +4209,7 @@ fn partial_tenure_fork() { config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); config.node.pox_sync_sample_secs = 30; + config.miner.block_commit_delay = Duration::from_secs(0); config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -3791,15 +4224,15 @@ fn partial_tenure_fork() { // Move epoch 2.5 and 3.0 earlier, so we have more time for the // test before re-stacking is required. if let Some(epochs) = config.burnchain.epochs.as_mut() { - epochs[6].end_height = 131; - epochs[7].start_height = 131; - epochs[7].end_height = 166; - epochs[8].start_height = 166; + epochs[StacksEpochId::Epoch24].end_height = 131; + epochs[StacksEpochId::Epoch25].start_height = 131; + epochs[StacksEpochId::Epoch25].end_height = 166; + epochs[StacksEpochId::Epoch30].start_height = 166; } else { panic!("Expected epochs to be set"); } }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); @@ -3820,7 +4253,7 @@ fn partial_tenure_fork() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -3828,6 +4261,13 @@ fn partial_tenure_fork() { conf.burnchain.peer_version, ); + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + let http_origin = format!("http://{}", &conf.node.rpc_bind); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); @@ -3836,6 +4276,8 @@ fn partial_tenure_fork() { let Counters { naka_mined_blocks: blocks_mined2, naka_proposed_blocks: blocks_proposed2, + naka_submitted_commits: commits_2, + naka_skip_commit_op: rl2_skip_commit_op, .. } = run_loop_2.counters(); @@ -3875,6 +4317,69 @@ fn partial_tenure_fork() { let mut miner_1_blocks = 0; let mut miner_2_blocks = 0; let mut min_miner_2_blocks = 0; + let mut last_sortition_winner: Option = None; + let mut miner_2_won_2_in_a_row = false; + + let commits_1 = signer_test.running_nodes.commits_submitted.clone(); + let rl1_skip_commit_op = signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .clone(); + + let sortdb = SortitionDB::open( + &conf.get_burn_db_file_path(), + false, + conf.get_burnchain().pox_constants, + ) + .unwrap(); + + info!("-------- Waiting miner 2 to catch up to miner 1 --------"); + + // Wait for miner 2 to catch up to miner 1 + wait_for(60, || { + let info_1 = get_chain_info(&conf); + let info_2 = get_chain_info(&conf_node_2); + Ok(info_1.stacks_tip_height == info_2.stacks_tip_height) + }) + .expect("Timed out waiting for miner 2 to catch up to miner 1"); + + info!("-------- Miner 2 caught up to miner 1 --------"); + + // Pause block commits + rl1_skip_commit_op.set(true); + rl2_skip_commit_op.set(true); + + let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); + let commits_before_1 = commits_1.load(Ordering::SeqCst); + let commits_before_2 = commits_2.load(Ordering::SeqCst); + + // Mine the first block + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 180, + || { + let mined_1 = blocks_mined1.load(Ordering::SeqCst); + let mined_2 = blocks_mined2.load(Ordering::SeqCst); + + Ok(mined_1 > mined_before_1 || mined_2 > mined_before_2) + }, + ) + .expect("Timed out waiting for new Stacks block to be mined"); + + info!("-------- Mined first block, wait for block commits --------"); + + // Unpause block commits and wait for both miners' commits + rl1_skip_commit_op.set(false); + rl2_skip_commit_op.set(false); + + // Ensure that both block commits have been sent before continuing + wait_for(60, || { + let commits_after_1 = commits_1.load(Ordering::SeqCst); + let commits_after_2 = commits_2.load(Ordering::SeqCst); + Ok(commits_after_1 > commits_before_1 && commits_after_2 > commits_before_2) + }) + .expect("Timed out waiting for block commits"); while miner_1_tenures < min_miner_1_tenures || miner_2_tenures < min_miner_2_tenures { if btc_blocks_mined >= max_nakamoto_tenures { @@ -3891,8 +4396,6 @@ fn partial_tenure_fork() { .nakamoto_blocks_proposed .load(Ordering::SeqCst); - sleep_ms(1000); - info!( "Next tenure checking"; "fork_initiated?" => fork_initiated, @@ -3906,6 +4409,14 @@ fn partial_tenure_fork() { "mined_before_2" => mined_before_2, ); + // Pause block commits + rl1_skip_commit_op.set(true); + rl2_skip_commit_op.set(true); + + let tip_before = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let commits_before_1 = commits_1.load(Ordering::SeqCst); + let commits_before_2 = commits_2.load(Ordering::SeqCst); + next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 60, @@ -3919,36 +4430,50 @@ fn partial_tenure_fork() { || mined_2 > mined_before_2) }, ) - .unwrap_or_else(|_| { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_1 = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - error!( - "Next tenure failed to tick"; - "fork_initiated?" => fork_initiated, - "miner_1_tenures" => miner_1_tenures, - "miner_2_tenures" => miner_2_tenures, - "min_miner_1_tenures" => min_miner_2_tenures, - "min_miner_2_tenures" => min_miner_2_tenures, - "proposed_before_1" => proposed_before_1, - "proposed_before_2" => proposed_before_2, - "mined_before_1" => mined_before_1, - "mined_before_2" => mined_before_2, - "mined_1" => mined_1, - "mined_2" => mined_2, - "proposed_1" => proposed_1, - "proposed_2" => proposed_2, - ); - panic!(); - }); + .expect("Timed out waiting for tenure change Stacks block"); btc_blocks_mined += 1; - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let miner = if mined_1 > mined_before_1 { 1 } else { 2 }; + // Unpause block commits + info!("Unpausing block commits"); + rl1_skip_commit_op.set(false); + rl2_skip_commit_op.set(false); + + // Wait for the block to be processed and the block commits to be submitted + wait_for(60, || { + let tip_after = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // Ensure that both block commits have been sent before continuing + let commits_after_1 = commits_1.load(Ordering::SeqCst); + let commits_after_2 = commits_2.load(Ordering::SeqCst); + Ok(commits_after_1 > commits_before_1 + && commits_after_2 > commits_before_2 + && tip_after.consensus_hash != tip_before.consensus_hash) + }) + .expect("Sortition DB tip did not change"); + + let tip_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + info!("tip_after: {:?}", tip_sn); + let miner = match tip_sn.miner_pk_hash { + Some(pk_hash) => { + if pk_hash == mining_pkh_1 { + 1 + } else { + 2 + } + } + None => { + panic!("No sortition found"); + } + }; + info!("Next tenure mined by miner {miner}"); + + if let Some(last_sortition_winner) = last_sortition_winner { + if last_sortition_winner == miner && miner == 2 { + miner_2_won_2_in_a_row = true; + } else { + miner_2_won_2_in_a_row = false; + } + } + last_sortition_winner = Some(miner); if miner == 1 && miner_1_tenures == 0 { // Setup miner 2 to ignore a block in this tenure @@ -4009,34 +4534,11 @@ fn partial_tenure_fork() { Ok((fork_initiated && proposed_2 > proposed_before_2) || mined_1 > mined_before_1 - || mined_2 > mined_before_2) + || mined_2 > mined_before_2 + // Special case where neither miner can mine a block: + || (fork_initiated && miner_2_won_2_in_a_row)) }) - .unwrap_or_else(|_| { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - let proposed_1 = signer_test - .running_nodes - .nakamoto_blocks_proposed - .load(Ordering::SeqCst); - let proposed_2 = blocks_proposed2.load(Ordering::SeqCst); - error!( - "Next tenure failed to tick"; - "fork_initiated?" => fork_initiated, - "miner_1_tenures" => miner_1_tenures, - "miner_2_tenures" => miner_2_tenures, - "min_miner_1_tenures" => min_miner_2_tenures, - "min_miner_2_tenures" => min_miner_2_tenures, - "proposed_before_1" => proposed_before_1, - "proposed_before_2" => proposed_before_2, - "mined_before_1" => mined_before_1, - "mined_before_2" => mined_before_2, - "mined_1" => mined_1, - "mined_2" => mined_2, - "proposed_1" => proposed_1, - "proposed_2" => proposed_2, - ); - panic!(); - }); + .expect("Timed out waiting for interim block to be mined"); } Err(e) => { if e.to_string().contains("TooMuchChaining") { @@ -4044,14 +4546,11 @@ fn partial_tenure_fork() { blocks = interim_block_ix; break; } else { - panic!("Failed to submit tx: {}", e); + panic!("Failed to submit tx: {e}"); } } } - info!( - "Attempted to mine interim block {}:{}", - btc_blocks_mined, interim_block_ix - ); + info!("Attempted to mine interim block {btc_blocks_mined}:{interim_block_ix}"); } if miner == 1 { @@ -4071,13 +4570,11 @@ fn partial_tenure_fork() { if miner == 1 { assert_eq!(mined_1, mined_before_1 + blocks + 1); + } else if miner_2_tenures < min_miner_2_tenures { + assert_eq!(mined_2, mined_before_2 + blocks + 1); } else { - if miner_2_tenures < min_miner_2_tenures { - assert_eq!(mined_2, mined_before_2 + blocks + 1); - } else { - // Miner 2 should have mined 0 blocks after the fork - assert_eq!(mined_2, mined_before_2); - } + // Miner 2 should have mined 0 blocks after the fork + assert_eq!(mined_2, mined_before_2); } } @@ -4097,10 +4594,7 @@ fn partial_tenure_fork() { // Must be at least the number of blocks mined by miner 1 and the number of blocks mined by miner 2 // before the fork was initiated assert!(peer_1_height >= pre_nakamoto_peer_1_height + miner_1_blocks + min_miner_2_blocks); - assert_eq!( - btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() - ); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); let sortdb = SortitionDB::open( &conf_node_2.get_burn_db_file_path(), @@ -4166,7 +4660,7 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { let short_timeout_secs = 20; let mut signer_test: SignerTest = SignerTest::new( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], ); let all_signers: Vec<_> = signer_test @@ -4233,6 +4727,8 @@ fn locally_accepted_blocks_overriden_by_global_rejection() { .unwrap() .replace(rejecting_signers.clone()); test_observer::clear(); + // Make a new stacks transaction to create a different block signature, but make sure to propose it + // AFTER the signers are unfrozen so they don't inadvertently prevent the new block being accepted let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -4351,7 +4847,7 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let mut signer_test: SignerTest = SignerTest::new( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], ); let all_signers: Vec<_> = signer_test @@ -4541,8 +5037,8 @@ fn locally_rejected_blocks_overriden_by_global_acceptance() { #[test] #[ignore] -/// Test that signers that have accept a locally signed block N+1 built in tenure A can sign a block proposed during a -/// new tenure B built upon the last globally accepted block N, i.e. a reorg can occur at a tenure boundary. +/// Test that signers that have accepted a locally signed block N+1 built in tenure A can sign a block proposed during a +/// new tenure B built upon the last globally accepted block N if the timeout is exceeded, i.e. a reorg can occur at a tenure boundary. /// /// Test Setup: /// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. @@ -4572,9 +5068,18 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let send_fee = 180; let nmb_txs = 2; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Just accept all reorg attempts + config.tenure_last_block_proposal_timeout = Duration::from_secs(0); + }, + |config| { + config.miner.block_commit_delay = Duration::from_secs(0); + }, + None, + None, ); let all_signers = signer_test .signer_stacks_private_keys @@ -4634,6 +5139,11 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .cloned() .take(num_signers * 7 / 10) .collect(); + let non_ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .skip(num_signers * 7 / 10) + .collect(); TEST_IGNORE_ALL_BLOCK_PROPOSALS .lock() .unwrap() @@ -4641,6 +5151,12 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { // Clear the stackerdb chunks test_observer::clear(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 let transfer_tx = make_stacks_transfer( &sender_sk, @@ -4653,13 +5169,8 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+1"); - let blocks_before = mined_blocks.load(Ordering::SeqCst); - let info_before = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info"); wait_for(short_timeout, || { - let ignored_signers = test_observer::get_stackerdb_chunks() + let accepted_signers = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) .filter_map(|chunk| { @@ -4667,7 +5178,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { .expect("Failed to deserialize SignerMessage"); match message { SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - ignoring_signers.iter().find(|key| { + non_ignoring_signers.iter().find(|key| { key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) .is_ok() }) @@ -4676,7 +5187,7 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { } }) .collect::>(); - Ok(ignored_signers.len() + ignoring_signers.len() == num_signers) + Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) }) .expect("FAIL: Timed out waiting for block proposal acceptance"); @@ -4753,22 +5264,22 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { #[test] #[ignore] -/// Test that when 70% of signers accept a block, mark it globally accepted, but a miner ends its tenure -/// before it receives these signatures, the miner can recover in the following tenure. +/// Test that signers that have accepted a locally signed block N+1 built in tenure A cannot sign a block proposed during a +/// new tenure B built upon the last globally accepted block N if the timeout is not exceeded, i.e. a reorg cannot occur at a tenure boundary +/// before the specified timeout has been exceeded. /// /// Test Setup: /// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. /// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. /// /// Test Execution: -/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but >70% accept it. -/// The signers delay broadcasting the block and the miner ends its tenure before it receives these signatures. The -/// miner will propose an invalid block N+1' which all signers reject. The broadcast delay is removed and the miner -/// proposes a new block N+2 which all signers accept. +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but <30% accept it. The remaining signers +/// do not make a decision on the block. A new tenure begins and the miner proposes a new block N+1' which all signers reject as the timeout +/// has not been exceeded. /// /// Test Assertion: -/// Stacks tip advances to N+2 -fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { +/// Stacks tip remains at N. +fn reorg_locally_accepted_blocks_across_tenures_fails() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -4784,36 +5295,34 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let nmb_txs = 3; + let nmb_txs = 2; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let mut signer_test: SignerTest = SignerTest::new( + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * nmb_txs)], + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + |config| { + // Do not alow any reorg attempts essentially + config.tenure_last_block_proposal_timeout = Duration::from_secs(100_000); + }, + |_| {}, + None, + None, ); + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let short_timeout = Duration::from_secs(30); + let short_timeout = 30; signer_test.boot_to_epoch_3(); - info!("------------------------- Starting Tenure A -------------------------"); info!("------------------------- Test Mine Nakamoto Block N -------------------------"); let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - let start_time = Instant::now(); - - // wait until we get a sortition. - // we might miss a block-commit at the start of epoch 3 - let burnchain = signer_test.running_nodes.conf.get_burnchain(); - let sortdb = burnchain.open_sortition_db(true).unwrap(); - - wait_for(30, || { - let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - Ok(tip.sortition) - }) - .expect("Timed out waiting for sortition"); // submit a tx so that the miner will mine a stacks block let mut sender_nonce = 0; @@ -4826,18 +5335,18 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { send_amt, ); let tx = submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; info!("Submitted tx {tx} in to mine block N"); + wait_for(short_timeout, || { + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + Ok(info_after.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); - // a tenure has begun, so wait until we mine a block - while mined_blocks.load(Ordering::SeqCst) <= blocks_before { - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } - - sender_nonce += 1; + // Ensure that the block was accepted globally so the stacks tip has advanced to N let info_after = signer_test .stacks_client .get_peer_info() @@ -4846,26 +5355,35 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info_before.stacks_tip_height + 1, info_after.stacks_tip_height ); - let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); let block_n = nakamoto_blocks.last().unwrap(); assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); - // Propose a valid block, but force the miner to ignore the returned signatures and delay the block being - // broadcasted to the miner so it can end its tenure before block confirmation obtained + // Make more than >70% of the signers ignore the block proposal to ensure it it is not globally accepted/rejected + let ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .take(num_signers * 7 / 10) + .collect(); + let non_ignoring_signers: Vec<_> = all_signers + .iter() + .cloned() + .skip(num_signers * 7 / 10) + .collect(); + TEST_IGNORE_ALL_BLOCK_PROPOSALS + .lock() + .unwrap() + .replace(ignoring_signers.clone()); // Clear the stackerdb chunks - info!("Forcing miner to ignore block responses for block N+1"); - TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); - info!("Delaying signer block N+1 broadcasting to the miner"); - TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); test_observer::clear(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); let info_before = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info"); - + // submit a tx so that the miner will ATTEMPT to mine a stacks block N+1 let transfer_tx = make_stacks_transfer( &sender_sk, sender_nonce, @@ -4874,66 +5392,273 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { &recipient, send_amt, ); - sender_nonce += 1; - let tx = submit_tx(&http_origin, &transfer_tx); info!("Submitted tx {tx} in to attempt to mine block N+1"); - let start_time = Instant::now(); - let mut block = None; - loop { - if block.is_none() { - block = test_observer::get_stackerdb_chunks() - .into_iter() - .flat_map(|chunk| chunk.modified_slots) - .find_map(|chunk| { - let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - .expect("Failed to deserialize SignerMessage"); - match message { - SignerMessage::BlockProposal(proposal) => { - if proposal.block.header.consensus_hash - == info_before.stacks_tip_consensus_hash - { - Some(proposal.block) - } else { - None - } - } - _ => None, - } - }); - } - if let Some(block) = &block { - let signatures = test_observer::get_stackerdb_chunks() + wait_for(short_timeout, || { + let accepted_signers = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + non_ignoring_signers.iter().find(|key| { + key.verify(accepted.signer_signature_hash.bits(), &accepted.signature) + .is_ok() + }) + } + _ => None, + } + }) + .collect::>(); + Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal acceptance"); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after, info_before); + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1 + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1 = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n); + assert_ne!(info_after.stacks_tip.to_string(), block_n_1.block_hash); + + info!("------------------------- Starting Tenure B -------------------------"); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + // Clear the test observer so any old rejections are not counted + test_observer::clear(); + + // Start a new tenure and ensure the we see the expected rejections + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let rejected_signers = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) .filter_map(|chunk| { let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) .expect("Failed to deserialize SignerMessage"); match message { - SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { - if block.header.signer_signature_hash() - == accepted.signer_signature_hash - { - Some(accepted.signature) - } else { - None - } - } + SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + signature, + signer_signature_hash, + .. + })) => non_ignoring_signers.iter().find(|key| { + key.verify(signer_signature_hash.bits(), &signature).is_ok() + }), _ => None, } }) .collect::>(); - if signatures.len() == num_signers { - break; - } - } - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for signers signatures for first block proposal", - ); - sleep_ms(1000); + Ok(rejected_signers.len() + ignoring_signers.len() == num_signers) + }, + ) + .expect("FAIL: Timed out waiting for block proposal rejections"); + + let blocks_after = mined_blocks.load(Ordering::SeqCst); + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!(blocks_after, blocks_before); + assert_eq!(info_after.stacks_tip, info_before.stacks_tip); + // Ensure that the block was NOT accepted globally so the stacks tip has NOT advanced to N+1' + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n_1_prime = nakamoto_blocks.last().unwrap(); + assert_ne!(block_n_1, block_n_1_prime); + assert_ne!( + info_after.stacks_tip.to_string(), + block_n_1_prime.block_hash + ); +} + +#[test] +#[ignore] +/// Test that when 70% of signers accept a block, mark it globally accepted, but a miner ends its tenure +/// before it receives these signatures, the miner can recover in the following tenure. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// The node mines 1 stacks block N (all signers sign it). The subsequent block N+1 is proposed, but >70% accept it. +/// The signers delay broadcasting the block and the miner ends its tenure before it receives these signatures. The +/// miner will propose an invalid block N+1' which all signers reject. The broadcast delay is removed and the miner +/// proposes a new block N+2 which all signers accept. +/// +/// Test Assertion: +/// Stacks tip advances to N+2 +fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let nmb_txs = 3; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let mut signer_test: SignerTest = SignerTest::new( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * nmb_txs)], + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + info!("------------------------- Starting Tenure A -------------------------"); + info!("------------------------- Test Mine Nakamoto Block N -------------------------"); + + // wait until we get a sortition. + // we might miss a block-commit at the start of epoch 3 + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + wait_for(30, || { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + Ok(tip.sortition) + }) + .expect("Timed out waiting for sortition"); + + let mined_blocks = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + // submit a tx so that the miner will mine a stacks block + let mut sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + + // a tenure has begun, so wait until we mine a block + wait_for(30, || { + let new_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(mined_blocks.load(Ordering::SeqCst) > blocks_before + && new_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be mined and processed"); + + sender_nonce += 1; + let info_after = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + assert_eq!( + info_before.stacks_tip_height + 1, + info_after.stacks_tip_height + ); + + let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = nakamoto_blocks.last().unwrap(); + assert_eq!(info_after.stacks_tip.to_string(), block_n.block_hash); + + info!("------------------------- Attempt to Mine Nakamoto Block N+1 -------------------------"); + // Propose a valid block, but force the miner to ignore the returned signatures and delay the block being + // broadcasted to the miner so it can end its tenure before block confirmation obtained + // Clear the stackerdb chunks + info!("Forcing miner to ignore block responses for block N+1"); + TEST_IGNORE_SIGNERS.lock().unwrap().replace(true); + info!("Delaying signer block N+1 broadcasting to the miner"); + TEST_PAUSE_BLOCK_BROADCAST.lock().unwrap().replace(true); + test_observer::clear(); + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let info_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + sender_nonce += 1; + + let tx = submit_tx(&http_origin, &transfer_tx); + + info!("Submitted tx {tx} in to attempt to mine block N+1"); + let mut block = None; + wait_for(30, || { + block = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .find_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockProposal(proposal) => { + if proposal.block.header.consensus_hash + == info_before.stacks_tip_consensus_hash + { + Some(proposal.block) + } else { + None + } + } + _ => None, + } + }); + let Some(block) = &block else { + return Ok(false); + }; + let signatures = test_observer::get_stackerdb_chunks() + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter_map(|chunk| { + let message = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + .expect("Failed to deserialize SignerMessage"); + match message { + SignerMessage::BlockResponse(BlockResponse::Accepted(accepted)) => { + if block.header.signer_signature_hash() == accepted.signer_signature_hash { + Some(accepted.signature) + } else { + None + } + } + _ => None, + } + }) + .collect::>(); + Ok(signatures.len() == num_signers) + }) + .expect("Test timed out while waiting for signers signatures for first block proposal"); let block = block.unwrap(); let blocks_after = mined_blocks.load(Ordering::SeqCst); @@ -4966,9 +5691,8 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { "------------------------- Attempt to Mine Nakamoto Block N+1' -------------------------" ); // Wait for the miner to propose a new invalid block N+1' - let start_time = Instant::now(); let mut rejected_block = None; - while rejected_block.is_none() { + wait_for(30, || { rejected_block = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) @@ -4989,11 +5713,9 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { _ => None, } }); - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for N+1' block proposal", - ); - } + Ok(rejected_block.is_some()) + }) + .expect("Timed out waiting for block proposal of N+1' block proposal"); info!("Allowing miner to accept block responses again. "); TEST_IGNORE_SIGNERS.lock().unwrap().replace(false); @@ -5002,7 +5724,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { // Assert the N+1' block was rejected let rejected_block = rejected_block.unwrap(); - loop { + wait_for(30, || { let stackerdb_events = test_observer::get_stackerdb_chunks(); let block_rejections = stackerdb_events .into_iter() @@ -5024,14 +5746,9 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } }) .collect::>(); - if block_rejections.len() == num_signers { - break; - } - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block proposal rejections", - ); - } + Ok(block_rejections.len() == num_signers) + }) + .expect("FAIL: Timed out waiting for block proposal rejections"); // Induce block N+2 to get mined let transfer_tx = make_stacks_transfer( @@ -5047,7 +5764,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { info!("Submitted tx {tx} in to attempt to mine block N+2"); info!("------------------------- Asserting a both N+1 and N+2 are accepted -------------------------"); - loop { + wait_for(30, || { // N.B. have to use /v2/info because mined_blocks only increments if the miner's signing // coordinator returns successfully (meaning, mined_blocks won't increment for block N+1) let info = signer_test @@ -5055,16 +5772,9 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .get_peer_info() .expect("Failed to get peer info"); - if info_before.stacks_tip_height + 2 <= info.stacks_tip_height { - break; - } - - assert!( - start_time.elapsed() < short_timeout, - "FAIL: Test timed out while waiting for block production", - ); - thread::sleep(Duration::from_secs(1)); - } + Ok(info_before.stacks_tip_height + 2 <= info.stacks_tip_height) + }) + .expect("Timed out waiting for blocks to be mined"); let info_after = signer_test .stacks_client @@ -5083,7 +5793,7 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { .expect("Not a Nakamoto block") .signer_signature .len(); - assert_eq!(nmb_signatures, num_signers); + assert!(nmb_signatures >= num_signers * 7 / 10); // Ensure that the block was accepted globally so the stacks tip has advanced to N+2 let nakamoto_blocks = test_observer::get_mined_nakamoto_blocks(); @@ -5092,253 +5802,42 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { assert_ne!(block_n_2, block_n); } +/// Test a scenario where: +/// Two miners boot to Nakamoto. +/// Miner 1 wins the first tenure and proposes a block N with a TenureChangePayload +/// Signers accept and the stacks tip advances to N +/// Miner 2 wins the second tenure B but its proposed blocks are rejected by the signers. +/// Mine 2 empty burn blocks (simulate fast blocks scenario) +/// Miner 2 proposes block N+1 with a TenureChangePayload +/// Signers accept and the stacks tip advances to N+1 +/// Miner 2 proposes block N+2 with a TokenTransfer +/// Signers accept and the stacks tip advances to N+2 +/// Mine an empty burn block +/// Miner 2 proposes block N+3 with a TenureExtend +/// Signers accept and the chain advances to N+3 +/// Miner 1 wins the next tenure and proposes a block N+4 with a TenureChangePayload +/// Signers accept and the chain advances to N+4 +/// Asserts: +/// - Block N+1 contains the TenureChangePayload +/// - Block N+2 contains the TokenTransfer +/// - Block N+3 contains the TenureExtend +/// - Block N+4 contains the TenureChangePayload +/// - The stacks tip advances to N+4 #[test] #[ignore] -/// Test that we can mine a tenure extend and then continue mining afterwards. -fn continue_after_tenure_extend() { +fn continue_after_fast_block_no_sortition() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); let num_signers = 5; - let sender_sk = Secp256k1PrivateKey::new(); - let sender_addr = tests::to_addr(&sender_sk); let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let send_amt = 100; - let send_fee = 180; - let mut signer_test: SignerTest = SignerTest::new( - num_signers, - vec![(sender_addr.clone(), (send_amt + send_fee) * 5)], - ); - let timeout = Duration::from_secs(200); - let coord_channel = signer_test.running_nodes.coord_channel.clone(); - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - - signer_test.boot_to_epoch_3(); - - info!("------------------------- Mine Normal Tenure -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); - - info!("------------------------- Extend Tenure -------------------------"); - signer_test - .running_nodes - .nakamoto_test_skip_commit_op - .0 - .lock() - .unwrap() - .replace(true); - - // It's possible that we have a pending block commit already. - // Mine two BTC blocks to "flush" this commit. - let burn_height = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info") - .burn_block_height; - for i in 0..2 { - info!( - "------------- After pausing commits, triggering 2 BTC blocks: ({} of 2) -----------", - i + 1 - ); - - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); - - wait_for(60, || { - let blocks_processed_after = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(blocks_processed_after > blocks_processed_before) - }) - .expect("Timed out waiting for tenure extend block"); - } - - wait_for(30, || { - let new_burn_height = signer_test - .stacks_client - .get_peer_info() - .expect("Failed to get peer info") - .burn_block_height; - Ok(new_burn_height == burn_height + 2) - }) - .expect("Timed out waiting for burnchain to advance"); - - // The last block should have a single instruction in it, the tenure extend - let blocks = test_observer::get_blocks(); - let last_block = blocks.last().unwrap(); - let transactions = last_block["transactions"].as_array().unwrap(); - let tx = transactions.first().expect("No transactions in block"); - let raw_tx = tx["raw_tx"].as_str().unwrap(); - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - match &parsed.payload { - TransactionPayload::TenureChange(payload) - if payload.cause == TenureChangeCause::Extended => {} - _ => panic!("Expected tenure extend transaction, got {:?}", parsed), - }; - - // Verify that the miner can continue mining in the tenure with the tenure extend - info!("------------------------- Mine After Tenure Extend -------------------------"); - let mut sender_nonce = 0; - let mut blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - for _ in 0..5 { - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - sender_nonce += 1; - submit_tx(&http_origin, &transfer_tx); - - info!("Submitted transfer tx and waiting for block proposal"); - wait_for(30, || { - let blocks_processed_after = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(blocks_processed_after > blocks_processed_before) - }) - .expect("Timed out waiting for block proposal"); - blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - info!("Block {blocks_processed_before} processed, continuing"); - } - - signer_test.shutdown(); -} - -#[test] -#[ignore] -/// Test that signers can successfully sign a block proposal in the 0th tenure of a reward cycle -/// This ensures there is no race condition in the /v2/pox endpoint which could prevent it from updating -/// on time, possibly triggering an "off by one" like behaviour in the 0th tenure. -/// -fn signing_in_0th_tenure_of_reward_cycle() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); - let signer_public_keys = signer_test - .signer_stacks_private_keys - .iter() - .map(StacksPublicKey::from_private) - .collect::>(); - let long_timeout = Duration::from_secs(200); - signer_test.boot_to_epoch_3(); - let curr_reward_cycle = signer_test.get_current_reward_cycle(); - let next_reward_cycle = curr_reward_cycle + 1; - // Mine until the boundary of the first full Nakamoto reward cycles (epoch 3 starts in the middle of one) - let next_reward_cycle_height_boundary = signer_test - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle) - .saturating_sub(1); - - info!("------------------------- Advancing to {next_reward_cycle} Boundary at Block {next_reward_cycle_height_boundary} -------------------------"); - signer_test.run_until_burnchain_height_nakamoto( - long_timeout, - next_reward_cycle_height_boundary, - num_signers, - ); - - let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); - let get_v3_signer = |pubkey: &Secp256k1PublicKey, reward_cycle: u64| { - let url = &format!( - "{http_origin}/v3/signer/{pk}/{reward_cycle}", - pk = pubkey.to_hex() - ); - info!("Send request: GET {url}"); - reqwest::blocking::get(url) - .unwrap_or_else(|e| panic!("GET request failed: {e}")) - .json::() - .unwrap() - .blocks_signed - }; - - assert_eq!(signer_test.get_current_reward_cycle(), curr_reward_cycle); - - for signer in &signer_public_keys { - let blocks_signed = get_v3_signer(&signer, next_reward_cycle); - assert_eq!(blocks_signed, 0); - } - - info!("------------------------- Enter Reward Cycle {next_reward_cycle} -------------------------"); - for signer in &signer_public_keys { - let blocks_signed = get_v3_signer(&signer, next_reward_cycle); - assert_eq!(blocks_signed, 0); - } - let blocks_before = signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst); - signer_test - .running_nodes - .btc_regtest_controller - .build_next_block(1); - - wait_for(30, || { - Ok(signer_test - .running_nodes - .nakamoto_blocks_mined - .load(Ordering::SeqCst) - > blocks_before) - }) - .unwrap(); - - for signer in &signer_public_keys { - let blocks_signed = get_v3_signer(&signer, next_reward_cycle); - assert_eq!(blocks_signed, 1); - } - assert_eq!(signer_test.get_current_reward_cycle(), next_reward_cycle); -} - -/// This test involves two miners with a custom chain id, each mining tenures with 6 blocks each. -/// Half of the signers are attached to each miner, so the test also verifies that -/// the signers' messages successfully make their way to the active miner. -#[test] -#[ignore] -fn multiple_miners_with_custom_chain_id() { - let num_signers = 5; - let max_nakamoto_tenures = 20; - let inter_blocks_per_tenure = 5; - - // setup sender + recipient for a test stx transfer let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 1000; + let send_amt = 100; let send_fee = 180; - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let num_txs = 1; + let sender_nonce = 0; let btc_miner_1_seed = vec![1, 1, 1, 1]; let btc_miner_2_seed = vec![2, 2, 2, 2]; @@ -5354,15 +5853,16 @@ fn multiple_miners_with_custom_chain_id() { let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); let mut node_2_listeners = Vec::new(); - let chain_id = 0x87654321; + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); // partition the signer set so that ~half are listening and using node 1 for RPC and events, // and the rest are using node 2 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, - vec![( - sender_addr.clone(), - (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, - )], + vec![(sender_addr, (send_amt + send_fee) * num_txs)], |signer_config| { let node_host = if signer_config.endpoint.port() % 2 == 0 { &node_1_rpc_bind @@ -5370,7 +5870,6 @@ fn multiple_miners_with_custom_chain_id() { &node_2_rpc_bind }; signer_config.node_host = node_host.to_string(); - signer_config.chain_id = Some(chain_id) }, |config| { config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); @@ -5379,7 +5878,7 @@ fn multiple_miners_with_custom_chain_id() { config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); config.miner.wait_on_interim_blocks = Duration::from_secs(5); config.node.pox_sync_sample_secs = 30; - config.burnchain.chain_id = chain_id; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); config.node.seed = btc_miner_1_seed.clone(); config.node.local_peer_seed = btc_miner_1_seed.clone(); @@ -5401,11 +5900,9 @@ fn multiple_miners_with_custom_chain_id() { false }) }, - Some(vec![btc_miner_1_pk.clone(), btc_miner_2_pk.clone()]), + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), None, ); - let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); - let conf = signer_test.running_nodes.conf.clone(); let mut conf_node_2 = conf.clone(); conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); @@ -5419,36 +5916,61 @@ fn multiple_miners_with_custom_chain_id() { conf_node_2.node.miner = true; conf_node_2.events_observers.clear(); conf_node_2.events_observers.extend(node_2_listeners); - assert!(!conf_node_2.events_observers.is_empty()); let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), conf.burnchain.chain_id, conf.burnchain.peer_version, ); - - let http_origin = format!("http://{}", &conf.node.rpc_bind); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); let run_loop_stopper_2 = run_loop_2.get_termination_switch(); let rl2_coord_channels = run_loop_2.coordinator_channels(); let Counters { naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, naka_mined_blocks: blocks_mined2, .. } = run_loop_2.counters(); - let run_loop_2_thread = thread::Builder::new() - .name("run_loop_2".into()) - .spawn(move || run_loop_2.start(None, 0)) - .unwrap(); - - signer_test.boot_to_epoch_3(); + + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + // Some helper functions for verifying the blocks contain their expected transactions + let verify_last_block_contains_transfer_tx = || { + let blocks = test_observer::get_blocks(); + let last_block = &blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + assert!( + matches!(parsed.payload, TransactionPayload::TokenTransfer(_, _, _)), + "Expected token transfer transaction, got {parsed:?}" + ); + }; + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); wait_for(120, || { let Some(node_1_info) = get_chain_info_opt(&conf) else { @@ -5459,143 +5981,2574 @@ fn multiple_miners_with_custom_chain_id() { }; Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) }) - .expect("Timed out waiting for follower to catch up to the miner"); + .expect("Timed out waiting for boostrapped node to catch up to the miner"); - let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf.miner.mining_key.unwrap(), + )); + let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf_node_2.miner.mining_key.unwrap(), + )); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); info!("------------------------- Reached Epoch 3.0 -------------------------"); - // due to the random nature of mining sortitions, the way this test is structured - // is that we keep track of how many tenures each miner produced, and once enough sortitions - // have been produced such that each miner has produced 3 tenures, we stop and check the - // results at the end - let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); - let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); - let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); - let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); - let mut btc_blocks_mined = 1; - let mut miner_1_tenures = 0; - let mut miner_2_tenures = 0; - let mut sender_nonce = 0; - while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { - if btc_blocks_mined > max_nakamoto_tenures { - panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); - } - let blocks_processed_before = - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - signer_test.mine_block_wait_on_processing( - &[&rl1_coord_channels, &rl2_coord_channels], - &[&rl1_commits, &rl2_commits], - Duration::from_secs(30), - ); - btc_blocks_mined += 1; + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + let starting_peer_height = get_chain_info(&conf).stacks_tip_height; + let starting_burn_height = get_burn_height(); + let mut btc_blocks_mined = 0; - // wait for the new block to be processed - wait_for(60, || { - let blocks_processed = - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - Ok(blocks_processed > blocks_processed_before) - }) - .unwrap(); + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); - info!( - "Nakamoto blocks mined: {}", - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst) - ); + info!("------------------------- Miner 1 Mines a Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; - // mine the interim blocks - info!("Mining interim blocks"); - for interim_block_ix in 0..inter_blocks_per_tenure { - let blocks_processed_before = - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - sender_nonce += 1; - submit_tx(&http_origin, &transfer_tx); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + btc_blocks_mined += 1; - wait_for(60, || { - let blocks_processed = - blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); - Ok(blocks_processed > blocks_processed_before) - }) - .unwrap(); - info!( - "Mined interim block {}:{}", - btc_blocks_mined, interim_block_ix - ); - } + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); - let blocks = get_nakamoto_headers(&conf); - let mut seen_burn_hashes = HashSet::new(); - miner_1_tenures = 0; - miner_2_tenures = 0; - for header in blocks.iter() { - if seen_burn_hashes.contains(&header.burn_header_hash) { - continue; - } - seen_burn_hashes.insert(header.burn_header_hash.clone()); + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); - let header = header.anchored_header.as_stacks_nakamoto().unwrap(); - if miner_1_pk - .verify( - header.miner_signature_hash().as_bytes(), - &header.miner_signature, - ) - .unwrap() - { - miner_1_tenures += 1; - } - if miner_2_pk - .verify( - header.miner_signature_hash().as_bytes(), - &header.miner_signature, + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Make Signers Reject All Subsequent Proposals -------------------------"); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // Make all signers ignore block proposals + let ignoring_signers = all_signers.to_vec(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(ignoring_signers.clone()); + + info!("------------------------- Submit Miner 2 Block Commit -------------------------"); + let rejections_before = signer_test + .running_nodes + .nakamoto_blocks_rejected + .load(Ordering::SeqCst); + + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + // Unpause miner 2's block commits + rl2_skip_commit_op.set(false); + + // Ensure the miner 2 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .unwrap(); + + // Make miner 2 also fail to submit any FURTHER block commits + rl2_skip_commit_op.set(true); + + let burn_height_before = get_burn_height(); + + info!("------------------------- Miner 2 Mines an Empty Tenure B -------------------------"; + "burn_height_before" => burn_height_before, + "rejections_before" => rejections_before, + ); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner B won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("----- Waiting for block rejections -----"); + let min_rejections = num_signers * 4 / 10; + // Wait until we have some block rejections + wait_for(30, || { + std::thread::sleep(Duration::from_secs(1)); + let chunks = test_observer::get_stackerdb_chunks(); + let rejections: Vec<_> = chunks + .into_iter() + .flat_map(|chunk| chunk.modified_slots) + .filter(|chunk| { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + return false; + }; + matches!( + message, + SignerMessage::BlockResponse(BlockResponse::Rejected(_)) ) - .unwrap() - { - miner_2_tenures += 1; - } - } - info!( - "Miner 1 tenures: {}, Miner 2 tenures: {}", - miner_1_tenures, miner_2_tenures + }) + .collect(); + Ok(rejections.len() >= min_rejections) + }) + .expect("Timed out waiting for block rejections"); + + // Mine another couple burn blocks and ensure there is _no_ sortition + info!("------------------------- Mine Two Burn Block(s) with No Sortitions -------------------------"); + for _ in 0..2 { + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let burn_height_before = get_burn_height(); + let commits_before_1 = rl1_commits.load(Ordering::SeqCst); + let commits_before_2 = rl2_commits.load(Ordering::SeqCst); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + btc_blocks_mined += 1; + + assert_eq!(rl1_commits.load(Ordering::SeqCst), commits_before_1); + assert_eq!(rl2_commits.load(Ordering::SeqCst), commits_before_2); + assert_eq!( + blocks_mined1.load(Ordering::SeqCst), + blocks_processed_before_1 + ); + assert_eq!( + blocks_mined2.load(Ordering::SeqCst), + blocks_processed_before_2 ); + + // assure we have NO sortition + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(!tip.sortition); } - info!( - "New chain info 1: {:?}", - get_chain_info(&signer_test.running_nodes.conf) + // Verify that no Stacks blocks have been mined (signers are ignoring) and no commits have been submitted by either miner + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + assert_eq!(stacks_height, stacks_height_before); + let stacks_height_before = stacks_height; + + info!("------------------------- Enabling Signer Block Proposals -------------------------"; + "stacks_height" => stacks_height_before, ); - info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + // Allow signers to respond to proposals again + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); - let peer_1_height = get_chain_info(&conf).stacks_tip_height; - let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; - info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); - assert_eq!(peer_1_height, peer_2_height); - assert_eq!( - peer_1_height, - pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) + info!("------------------------- Wait for Miner B's Block N -------------------------"); + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!( + "------------------------- Verify Tenure Change Tx in Miner B's Block N -------------------------" ); - assert_eq!( - btc_blocks_mined, - u64::try_from(miner_1_tenures + miner_2_tenures).unwrap() + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Wait for Miner B's Block N+1 -------------------------"); + + let nmb_old_blocks = test_observer::get_blocks().len(); + let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, ); + submit_tx(&http_origin, &transfer_tx); - // Verify both nodes have the correct chain id - let miner1_info = get_chain_info(&signer_test.running_nodes.conf); - assert_eq!(miner1_info.network_id, chain_id); + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); - let miner2_info = get_chain_info(&conf_node_2); - assert_eq!(miner2_info.network_id, chain_id); + info!("------------------------- Verify Miner B's Block N+1 -------------------------"); + + verify_last_block_contains_transfer_tx(); + + info!("------------------------- Mine An Empty Sortition -------------------------"); + let nmb_old_blocks = test_observer::get_blocks().len(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && test_observer::get_blocks().len() > nmb_old_blocks) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + info!("------------------------- Verify Miner B's Issues a Tenure Change Extend in Block N+2 -------------------------"); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + + info!("------------------------- Unpause Miner A's Block Commits -------------------------"); + let commits_before_1 = rl1_commits.load(Ordering::SeqCst); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(false); + wait_for(30, || { + Ok(rl1_commits.load(Ordering::SeqCst) > commits_before_1) + }) + .unwrap(); + + info!("------------------------- Run Miner A's Tenure -------------------------"); + let nmb_old_blocks = test_observer::get_blocks().len(); + let burn_height_before = get_burn_height(); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + Ok(get_burn_height() > burn_height_before + && blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && test_observer::get_blocks().len() > nmb_old_blocks) + }, + ) + .unwrap(); + btc_blocks_mined += 1; + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + info!("------------------------- Verify Miner A's Issued a Tenure Change in Block N+4 -------------------------"); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!( + "------------------------- Confirm Burn and Stacks Block Heights -------------------------" + ); + let peer_info = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info"); + + assert_eq!(get_burn_height(), starting_burn_height + btc_blocks_mined); + assert_eq!(peer_info.stacks_tip_height, starting_peer_height + 5); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that we can mine a tenure extend and then continue mining afterwards. +fn continue_after_tenure_extend() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let send_amt = 100; + let send_fee = 180; + let mut signer_test: SignerTest = + SignerTest::new(num_signers, vec![(sender_addr, (send_amt + send_fee) * 5)]); + let timeout = Duration::from_secs(200); + let coord_channel = signer_test.running_nodes.coord_channel.clone(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("------------------------- Mine Normal Tenure -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + + info!("------------------------- Extend Tenure -------------------------"); + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + // It's possible that we have a pending block commit already. + // Mine two BTC blocks to "flush" this commit. + let burn_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .burn_block_height; + for i in 0..2 { + info!( + "------------- After pausing commits, triggering 2 BTC blocks: ({} of 2) -----------", + i + 1 + ); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + wait_for(60, || { + let blocks_processed_after = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed_after > blocks_processed_before) + }) + .expect("Timed out waiting for tenure extend block"); + } + + wait_for(30, || { + let new_burn_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .burn_block_height; + Ok(new_burn_height == burn_height + 2) + }) + .expect("Timed out waiting for burnchain to advance"); + + // The last block should have a single instruction in it, the tenure extend + let blocks = test_observer::get_blocks(); + let last_block = blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + let tx = transactions.first().expect("No transactions in block"); + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TenureChange(payload) + if payload.cause == TenureChangeCause::Extended => {} + _ => panic!("Expected tenure extend transaction, got {parsed:?}"), + }; + + // Verify that the miner can continue mining in the tenure with the tenure extend + info!("------------------------- Mine After Tenure Extend -------------------------"); + let mut blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + for sender_nonce in 0..5 { + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(30, || { + let blocks_processed_after = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed_after > blocks_processed_before) + }) + .expect("Timed out waiting for block proposal"); + blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + info!("Block {blocks_processed_before} processed, continuing"); + } + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that signers can successfully sign a block proposal in the 0th tenure of a reward cycle +/// This ensures there is no race condition in the /v2/pox endpoint which could prevent it from updating +/// on time, possibly triggering an "off by one" like behaviour in the 0th tenure. +/// +fn signing_in_0th_tenure_of_reward_cycle() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); + let signer_public_keys = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + let long_timeout = Duration::from_secs(200); + signer_test.boot_to_epoch_3(); + let curr_reward_cycle = signer_test.get_current_reward_cycle(); + let next_reward_cycle = curr_reward_cycle + 1; + // Mine until the boundary of the first full Nakamoto reward cycles (epoch 3 starts in the middle of one) + let next_reward_cycle_height_boundary = signer_test + .running_nodes + .btc_regtest_controller + .get_burnchain() + .reward_cycle_to_block_height(next_reward_cycle) + .saturating_sub(1); + + info!("------------------------- Advancing to {next_reward_cycle} Boundary at Block {next_reward_cycle_height_boundary} -------------------------"); + signer_test.run_until_burnchain_height_nakamoto( + long_timeout, + next_reward_cycle_height_boundary, + num_signers, + ); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let get_v3_signer = |pubkey: &Secp256k1PublicKey, reward_cycle: u64| { + let url = &format!( + "{http_origin}/v3/signer/{pk}/{reward_cycle}", + pk = pubkey.to_hex() + ); + info!("Send request: GET {url}"); + reqwest::blocking::get(url) + .unwrap_or_else(|e| panic!("GET request failed: {e}")) + .json::() + .unwrap() + .blocks_signed + }; + + assert_eq!(signer_test.get_current_reward_cycle(), curr_reward_cycle); + + for signer in &signer_public_keys { + let blocks_signed = get_v3_signer(signer, next_reward_cycle); + assert_eq!(blocks_signed, 0); + } + + info!("------------------------- Enter Reward Cycle {next_reward_cycle} -------------------------"); + for signer in &signer_public_keys { + let blocks_signed = get_v3_signer(signer, next_reward_cycle); + assert_eq!(blocks_signed, 0); + } + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + wait_for(30, || { + Ok(signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst) + > blocks_before) + }) + .unwrap(); + + let block_mined = test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .clone(); + // Must ensure that the signers that signed the block have their blocks_signed updated appropriately + for signature in &block_mined.signer_signature { + let signer = signer_public_keys + .iter() + .find(|pk| { + pk.verify(block_mined.signer_signature_hash.as_bytes(), signature) + .unwrap() + }) + .expect("Unknown signer signature"); + let blocks_signed = get_v3_signer(signer, next_reward_cycle); + assert_eq!(blocks_signed, 1); + } + assert_eq!(signer_test.get_current_reward_cycle(), next_reward_cycle); +} + +/// This test involves two miners with a custom chain id, each mining tenures with 6 blocks each. +/// Half of the signers are attached to each miner, so the test also verifies that +/// the signers' messages successfully make their way to the active miner. +#[test] +#[ignore] +fn multiple_miners_with_custom_chain_id() { + let num_signers = 5; + let max_nakamoto_tenures = 20; + let inter_blocks_per_tenure = 5; + + // setup sender + recipient for a test stx transfer + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + let chain_id = 0x87654321; + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![( + sender_addr, + (send_amt + send_fee) * max_nakamoto_tenures * inter_blocks_per_tenure, + )], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.chain_id = Some(chain_id) + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.chain_id = chain_id; + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_mined_blocks: blocks_mined2, + .. + } = run_loop_2.counters(); + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for follower to catch up to the miner"); + + let pre_nakamoto_peer_1_height = get_chain_info(&conf).stacks_tip_height; + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + // due to the random nature of mining sortitions, the way this test is structured + // is that we keep track of how many tenures each miner produced, and once enough sortitions + // have been produced such that each miner has produced 3 tenures, we stop and check the + // results at the end + let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + + let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); + let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); + let mut btc_blocks_mined = 1; + let mut miner_1_tenures = 0; + let mut miner_2_tenures = 0; + let mut sender_nonce = 0; + while !(miner_1_tenures >= 3 && miner_2_tenures >= 3) { + if btc_blocks_mined > max_nakamoto_tenures { + panic!("Produced {btc_blocks_mined} sortitions, but didn't cover the test scenarios, aborting"); + } + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + signer_test.mine_block_wait_on_processing( + &[&rl1_coord_channels, &rl2_coord_channels], + &[&rl1_commits, &rl2_commits], + Duration::from_secs(30), + ); + btc_blocks_mined += 1; + + // wait for the new block to be processed + wait_for(60, || { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + + info!( + "Nakamoto blocks mined: {}", + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst) + ); + + // mine the interim blocks + info!("Mining interim blocks"); + for interim_block_ix in 0..inter_blocks_per_tenure { + let blocks_processed_before = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + sender_nonce += 1; + submit_tx(&http_origin, &transfer_tx); + + wait_for(60, || { + let blocks_processed = + blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); + Ok(blocks_processed > blocks_processed_before) + }) + .unwrap(); + info!("Mined interim block {btc_blocks_mined}:{interim_block_ix}"); + } + + let blocks = get_nakamoto_headers(&conf); + let mut seen_burn_hashes = HashSet::new(); + miner_1_tenures = 0; + miner_2_tenures = 0; + for header in blocks.iter() { + if seen_burn_hashes.contains(&header.burn_header_hash) { + continue; + } + seen_burn_hashes.insert(header.burn_header_hash); + + let header = header.anchored_header.as_stacks_nakamoto().unwrap(); + if miner_1_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_1_tenures += 1; + } + if miner_2_pk + .verify( + header.miner_signature_hash().as_bytes(), + &header.miner_signature, + ) + .unwrap() + { + miner_2_tenures += 1; + } + } + info!("Miner 1 tenures: {miner_1_tenures}, Miner 2 tenures: {miner_2_tenures}"); + } + + info!( + "New chain info 1: {:?}", + get_chain_info(&signer_test.running_nodes.conf) + ); + + info!("New chain info 2: {:?}", get_chain_info(&conf_node_2)); + + let peer_1_height = get_chain_info(&conf).stacks_tip_height; + let peer_2_height = get_chain_info(&conf_node_2).stacks_tip_height; + info!("Peer height information"; "peer_1" => peer_1_height, "peer_2" => peer_2_height, "pre_naka_height" => pre_nakamoto_peer_1_height); + assert_eq!(peer_1_height, peer_2_height); + assert_eq!( + peer_1_height, + pre_nakamoto_peer_1_height + (btc_blocks_mined - 1) * (inter_blocks_per_tenure + 1) + ); + assert_eq!(btc_blocks_mined, miner_1_tenures + miner_2_tenures); + + // Verify both nodes have the correct chain id + let miner1_info = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!(miner1_info.network_id, chain_id); + + let miner2_info = get_chain_info(&conf_node_2); + assert_eq!(miner2_info.network_id, chain_id); + + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test checks the behavior of the `block_commit_delay_ms` configuration option. +fn block_commit_delay() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let block_proposal_timeout = Duration::from_secs(20); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |config| { + // make the duration long enough that the miner will be marked as malicious + config.block_proposal_timeout = block_proposal_timeout; + }, + |config| { + // Set the block commit delay to 10 minutes to ensure no block commit is sent + config.miner.block_commit_delay = Duration::from_secs(600); + }, + None, + None, + ); + + signer_test.boot_to_epoch_3(); + + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + &signer_test.running_nodes.coord_channel, + ) + .expect("Failed to mine first block"); + + // Ensure that the block commit has been sent before continuing + wait_for(60, || { + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits > commits_before) + }) + .expect("Timed out waiting for block commit after new Stacks block"); + + // Prevent a block from being mined by making signers reject it. + let all_signers = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect::>(); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(all_signers); + + info!("------------------------- Test Mine Burn Block -------------------------"); + let burn_height_before = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + let commits_before = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + + // Mine a burn block and wait for it to be processed. + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let burn_height = get_chain_info(&signer_test.running_nodes.conf).burn_block_height; + Ok(burn_height > burn_height_before) + }, + ) + .unwrap(); + + // Sleep an extra minute to ensure no block commits are sent + sleep_ms(60_000); + + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + assert_eq!(commits, commits_before); + + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + info!("------------------------- Resume Signing -------------------------"); + TEST_REJECT_ALL_BLOCK_PROPOSAL + .lock() + .unwrap() + .replace(Vec::new()); + + // Wait for a block to be mined + wait_for(60, || { + let blocks = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + Ok(blocks > blocks_before) + }) + .expect("Timed out waiting for block to be mined"); + + // Wait for a block commit to be sent + wait_for(60, || { + let commits = signer_test + .running_nodes + .commits_submitted + .load(Ordering::SeqCst); + Ok(commits > commits_before) + }) + .expect("Timed out waiting for block commit after new Stacks block"); + + signer_test.shutdown(); +} + +// Ensures that a signer that successfully submits a block to the node for validation +// will issue ConnectivityIssues rejections if a block submission times out. +// Also ensures that no other proposal gets submitted for validation if we +// are already waiting for a block submission response. +#[test] +#[ignore] +fn block_validation_response_timeout() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let timeout = Duration::from_secs(30); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.block_proposal_validation_timeout = timeout; + }, + |_| {}, + None, + None, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + signer_test.boot_to_epoch_3(); + + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + info!("------------------------- Test Block Validation Stalled -------------------------"); + TEST_VALIDATE_STALL.lock().unwrap().replace(true); + let validation_stall_start = Instant::now(); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + + // submit a tx so that the miner will attempt to mine an extra block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal"); + wait_for(30, || { + Ok(signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst) + > proposals_before) + }) + .expect("Timed out waiting for block proposal"); + + assert!( + validation_stall_start.elapsed() < timeout, + "Test was too slow to propose another block before the timeout" + ); + + info!("------------------------- Propose Another Block Before Hitting the Timeout -------------------------"); + let proposal_conf = ProposalEvalConfig { + first_proposal_burn_block_timing: Duration::from_secs(0), + tenure_last_block_proposal_timeout: Duration::from_secs(30), + block_proposal_timeout: Duration::from_secs(100), + }; + let mut block = NakamotoBlock { + header: NakamotoBlockHeader::empty(), + txs: vec![], + }; + + let info_before = get_chain_info(&signer_test.running_nodes.conf); + // Propose a block to the signers that passes initial checks but will not be submitted to the stacks node due to the submission stall + let view = SortitionsView::fetch_view(proposal_conf, &signer_test.stacks_client).unwrap(); + block.header.pox_treatment = BitVec::ones(1).unwrap(); + block.header.consensus_hash = view.cur_sortition.consensus_hash; + block.header.chain_length = info_before.stacks_tip_height + 1; + + let block_signer_signature_hash_1 = block.header.signer_signature_hash(); + signer_test.propose_block(block, timeout); + + info!("------------------------- Waiting for Timeout -------------------------"); + // Sleep the necessary timeout to make sure the validation times out. + let elapsed = validation_stall_start.elapsed(); + let wait = timeout.saturating_sub(elapsed); + info!("Sleeping for {} ms", wait.as_millis()); + std::thread::sleep(timeout.saturating_sub(elapsed)); + + info!("------------------------- Wait for Block Rejection Due to Timeout -------------------------"); + // Verify that the signer that submits the block to the node will issue a ConnectivityIssues rejection + wait_for(30, || { + let chunks = test_observer::get_stackerdb_chunks(); + for chunk in chunks.into_iter().flat_map(|chunk| chunk.modified_slots) { + let Ok(message) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) + else { + continue; + }; + let SignerMessage::BlockResponse(BlockResponse::Rejected(BlockRejection { + reason: _reason, + reason_code, + signer_signature_hash, + .. + })) = message + else { + continue; + }; + // We are waiting for the original block proposal which will have a diff signature to our + // second proposed block. + assert_ne!( + signer_signature_hash, block_signer_signature_hash_1, + "Received a rejection for the wrong block" + ); + if matches!(reason_code, RejectCode::ConnectivityIssues) { + return Ok(true); + } + } + Ok(false) + }) + .expect("Timed out waiting for block proposal rejections"); + // Make sure our chain has still not advanced + let info_after = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!(info_before, info_after); + let info_before = info_after; + info!("Unpausing block validation"); + // Disable the stall and wait for the block to be processed successfully + TEST_VALIDATE_STALL.lock().unwrap().replace(false); + wait_for(30, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }) + .expect("Timed out waiting for block to be processed"); + + let info_after = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1, + ); + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + let info_before = info_after; + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers); + + wait_for(30, || { + let info = get_chain_info(&signer_test.running_nodes.conf); + Ok(info.stacks_tip_height > info_before.stacks_tip_height) + }) + .unwrap(); + + let info_after = get_chain_info(&signer_test.running_nodes.conf); + assert_eq!( + info_after.stacks_tip_height, + info_before.stacks_tip_height + 1, + ); +} + +#[test] +#[ignore] +/// Test that a miner will extend its tenure after the succeding miner fails to mine a block. +/// - Miner 1 wins a tenure and mines normally +/// - Miner 2 wins a tenure but fails to mine a block +/// - Miner 1 extends its tenure +fn tenure_extend_after_failed_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 2; + let mut sender_nonce = 0; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.block_proposal_timeout = Duration::from_secs(30); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf.miner.mining_key.unwrap(), + )); + let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf_node_2.miner.mining_key.unwrap(), + )); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .set(true); + + info!("------------------------- Miner 1 Wins Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("------------------------- Pause Block Proposals -------------------------"); + TEST_MINE_STALL.lock().unwrap().replace(true); + + // Unpause miner 2's block commits + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); + + // Ensure miner 2 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .unwrap(); + + info!("------------------------- Miner 2 Wins Tenure B, Mines No Blocks -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + // assure we have a successful sortition that miner B won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("------------------------- Wait for Block Proposal Timeout -------------------------"); + sleep_ms( + signer_test.signer_configs[0] + .block_proposal_timeout + .as_millis() as u64 + * 2, + ); + + info!("------------------------- Miner 1 Extends Tenure A -------------------------"); + + // Re-enable block mining + TEST_MINE_STALL.lock().unwrap().replace(false); + + // wait for a tenure extend block from miner 1 to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for tenure extend block to be mined and processed"); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + // Re-enable block commits for miner 2 + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(true); + + // Wait for block commit from miner 2 + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); + + info!("------------------------- Miner 2 Mines the Next Tenure -------------------------"); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(stacks_height > stacks_height_before) + }, + ) + .expect("Timed out waiting for final block to be mined and processed"); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that a miner will extend its tenure after the succeding miner commits to the wrong block. +/// - Miner 1 wins a tenure and mines normally +/// - Miner 1 wins another tenure and mines normally, but miner 2 does not see any blocks from this tenure +/// - Miner 2 wins a tenure and is unable to mine a block +/// - Miner 1 extends its tenure and mines an additional block +/// - Miner 2 wins the next tenure and mines normally +fn tenure_extend_after_bad_commit() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 2; + let mut sender_nonce = 0; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let first_proposal_burn_block_timing = Duration::from_secs(1); + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.block_proposal_timeout = Duration::from_secs(30); + signer_config.first_proposal_burn_block_timing = first_proposal_burn_block_timing; + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let rl1_skip_commit_op = signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf.miner.mining_key.unwrap(), + )); + let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf_node_2.miner.mining_key.unwrap(), + )); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + let sort_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let info_1 = get_chain_info(&conf); + let info_2 = get_chain_info(&conf_node_2); + min( + sort_height, + min(info_1.burn_block_height, info_2.burn_block_height), + ) + }; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Wins Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let info_2 = get_chain_info(&conf_node_2); + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && info_2.stacks_tip_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let info_2 = get_chain_info(&conf_node_2); + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && info_2.stacks_tip_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("------------------------- Pause Block Proposals -------------------------"); + TEST_MINE_STALL.lock().unwrap().replace(true); + + // Unpause miner 1's block commits + let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); + rl1_skip_commit_op.set(false); + + // Ensure miner 1 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) + }) + .unwrap(); + + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Wins Tenure B -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + // assure we have a successful sortition that miner 1 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + info!("----------------- Miner 2 Submits Block Commit Before Any Blocks ------------------"); + + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); + + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); + + // Re-pause block commits for miner 2 so that it cannot RBF its original commit + rl2_skip_commit_op.set(true); + + info!("----------------------------- Resume Block Production -----------------------------"); + + TEST_MINE_STALL.lock().unwrap().replace(false); + + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let info_2 = get_chain_info(&conf_node_2); + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && info_2.stacks_tip_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("--------------- Miner 2 Wins Tenure C With Old Block Commit ----------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); + + // Sleep enough time to pass the first proposal burn block timing + let sleep_duration = first_proposal_burn_block_timing.saturating_add(Duration::from_secs(2)); + info!( + "Sleeping for {} seconds before issuing next burn block.", + sleep_duration.as_secs() + ); + thread::sleep(sleep_duration); + + info!("--------------- Triggering new burn block for tenure C ---------------"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .expect("Timed out waiting for burn block to be processed"); + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("------------------------- Miner 1 Extends Tenure B -------------------------"); + + // wait for a tenure extend block from miner 1 to be processed + // (miner 2's proposals will be rejected) + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let info_2 = get_chain_info(&conf_node_2); + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && info_2.stacks_tip_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for tenure extend block to be mined and processed"); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let info_2 = get_chain_info(&conf_node_2); + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && info_2.stacks_tip_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("------------------------- Miner 2 Mines the Next Tenure -------------------------"); + + // Re-enable block commits for miner 2 + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); + + // Wait for block commit from miner 2 + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let info_2 = get_chain_info(&conf_node_2); + Ok(stacks_height > stacks_height_before + && info_2.stacks_tip_height > stacks_height_before) + }, + ) + .expect("Timed out waiting for final block to be mined and processed"); + + // assure we have a successful sortition that miner 2 won and it had a block found tenure change + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Shutdown -------------------------"); + rl2_coord_channels + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper_2.store(false, Ordering::SeqCst); + run_loop_2_thread.join().unwrap(); + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// Test that a miner will extend its tenure after the succeding miner commits to the wrong block. +/// - Miner 1 wins a tenure and mines normally +/// - Miner 1 wins another tenure and mines normally, but miner 2 does not see any blocks from this tenure +/// - Miner 2 wins a tenure and is unable to mine a block +/// - Miner 1 extends its tenure and mines an additional block +/// - Miner 2 wins another tenure and is still unable to mine a block +/// - Miner 1 extends its tenure again and mines an additional block +/// - Miner 2 wins the next tenure and mines normally +fn tenure_extend_after_2_bad_commits() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let num_signers = 5; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let num_txs = 2; + let mut sender_nonce = 0; + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + let max_nakamoto_tenures = 30; + + info!("------------------------- Test Setup -------------------------"); + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * num_txs)], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + signer_config.block_proposal_timeout = Duration::from_secs(30); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + config.miner.wait_on_interim_blocks = Duration::from_secs(5); + config.node.pox_sync_sample_secs = 30; + config.burnchain.pox_reward_length = Some(max_nakamoto_tenures); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + + config.events_observers.retain(|listener| { + let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + return true; + }; + if addr.port() % 2 == 0 || addr.port() == test_observer::EVENT_OBSERVER_PORT { + return true; + } + node_2_listeners.push(listener.clone()); + false + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + + let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let rl1_skip_commit_op = signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .clone(); + + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = format!("{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed.clone(); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let run_loop_stopper_2 = run_loop_2.get_termination_switch(); + let rl2_coord_channels = run_loop_2.coordinator_channels(); + let Counters { + naka_submitted_commits: rl2_commits, + naka_skip_commit_op: rl2_skip_commit_op, + .. + } = run_loop_2.counters(); + + let blocks_mined1 = signer_test.running_nodes.nakamoto_blocks_mined.clone(); + + info!("------------------------- Pause Miner 2's Block Commits -------------------------"); + + // Make sure Miner 2 cannot win a sortition at first. + rl2_skip_commit_op.set(true); + + info!("------------------------- Boot to Epoch 3.0 -------------------------"); + + let run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let mining_pkh_1 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf.miner.mining_key.unwrap(), + )); + let mining_pkh_2 = Hash160::from_node_public_key(&StacksPublicKey::from_private( + &conf_node_2.miner.mining_key.unwrap(), + )); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + let burnchain = signer_test.running_nodes.conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + + let get_burn_height = || { + let sort_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height; + let info_1 = get_chain_info(&conf); + let info_2 = get_chain_info(&conf_node_2); + min( + sort_height, + min(info_1.burn_block_height, info_2.burn_block_height), + ) + }; + + info!("------------------------- Pause Miner 1's Block Commit -------------------------"); + // Make sure miner 1 doesn't submit any further block commits for the next tenure BEFORE mining the bitcoin block + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Wins Normal Tenure A -------------------------"); + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + signer_test + .running_nodes + .btc_regtest_controller + .build_next_block(1); + + // assure we have a successful sortition that miner A won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + // wait for the new block to be processed + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .unwrap(); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + sender_nonce += 1; + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("------------------------- Pause Block Proposals -------------------------"); + TEST_MINE_STALL.lock().unwrap().replace(true); + + // Unpause miner 1's block commits + let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); + rl1_skip_commit_op.set(false); + + // Ensure miner 1 submits a block commit before mining the bitcoin block + wait_for(30, || { + Ok(rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) + }) + .unwrap(); + + rl1_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Wins Tenure B -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + // assure we have a successful sortition that miner 1 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); + + info!("----------------- Miner 2 Submits Block Commit Before Any Blocks ------------------"); + + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); + + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); + + // Re-pause block commits for miner 2 so that it cannot RBF its original commit + rl2_skip_commit_op.set(true); + + info!("----------------------------- Resume Block Production -----------------------------"); + + TEST_MINE_STALL.lock().unwrap().replace(false); + + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("--------------- Miner 2 Wins Tenure C With Old Block Commit ----------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); + + // Pause block production again so that we can make sure miner 2 commits + // to the wrong block again. + TEST_MINE_STALL.lock().unwrap().replace(true); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .expect("Timed out waiting for burn block to be processed"); + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + info!("---------- Miner 2 Submits Block Commit Before Any Blocks (again) ----------"); + + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); + + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); + + // Re-pause block commits for miner 2 so that it cannot RBF its original commit + rl2_skip_commit_op.set(true); + + info!("------------------------- Miner 1 Extends Tenure B -------------------------"); + + TEST_MINE_STALL.lock().unwrap().replace(false); + + // wait for a tenure extend block from miner 1 to be processed + // (miner 2's proposals will be rejected) + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for tenure extend block to be mined and processed"); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("------------ Miner 2 Wins Tenure C With Old Block Commit (again) -----------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + let burn_height_before = get_burn_height(); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || Ok(get_burn_height() > burn_height_before), + ) + .expect("Timed out waiting for burn block to be processed"); + + // assure we have a successful sortition that miner 2 won + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); + + info!("---------------------- Miner 1 Extends Tenure B (again) ---------------------"); + + TEST_MINE_STALL.lock().unwrap().replace(false); + + // wait for a tenure extend block from miner 1 to be processed + // (miner 2's proposals will be rejected) + wait_for(60, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for tenure extend block to be mined and processed"); + + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + + info!("------------------------- Miner 1 Mines Another Block -------------------------"); + + let blocks_processed_before_1 = blocks_mined1.load(Ordering::SeqCst); + let nmb_old_blocks = test_observer::get_blocks().len(); + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // wait for the new block to be processed + wait_for(30, || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok( + blocks_mined1.load(Ordering::SeqCst) > blocks_processed_before_1 + && stacks_height > stacks_height_before + && test_observer::get_blocks().len() > nmb_old_blocks, + ) + }) + .expect("Timed out waiting for block to be mined and processed"); + + info!("----------------------- Miner 2 Mines the Next Tenure -----------------------"); + + // Re-enable block commits for miner 2 + let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); + rl2_skip_commit_op.set(false); + + // Wait for block commit from miner 2 + wait_for(30, || { + Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before) + }) + .expect("Timed out waiting for block commit from miner 2"); + + let stacks_height_before = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 60, + || { + let stacks_height = signer_test + .stacks_client + .get_peer_info() + .expect("Failed to get peer info") + .stacks_tip_height; + Ok(stacks_height > stacks_height_before) + }, + ) + .expect("Timed out waiting for final block to be mined and processed"); + + // assure we have a successful sortition that miner 2 won and it had a block found tenure change + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + assert!(tip.sortition); + assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_2); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); + info!("------------------------- Shutdown -------------------------"); rl2_coord_channels .lock() .expect("Mutex poisoned") diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index aa620d349b..c68b477b47 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -41,14 +41,13 @@ fn post_stackerdb_chunk( slot_version: u32, ) -> StackerDBChunkAckData { let mut chunk = StackerDBChunkData::new(slot_id, slot_version, data); - chunk.sign(&signer).unwrap(); + chunk.sign(signer).unwrap(); let chunk_body = serde_json::to_string(&chunk).unwrap(); let client = reqwest::blocking::Client::new(); let path = format!( - "{}/v2/stackerdb/{}/{}/chunks", - http_origin, + "{http_origin}/v2/stackerdb/{}/{}/chunks", &StacksAddress::from(stackerdb_contract_id.issuer.clone()), stackerdb_contract_id.name ); @@ -60,8 +59,8 @@ fn post_stackerdb_chunk( .unwrap(); if res.status().is_success() { let ack: StackerDBChunkAckData = res.json().unwrap(); - info!("Got stackerdb ack: {:?}", &ack); - return ack; + info!("Got stackerdb ack: {ack:?}"); + ack } else { eprintln!("StackerDB post error: {}", res.text().unwrap()); panic!(""); @@ -76,20 +75,15 @@ fn get_stackerdb_chunk( ) -> Vec { let path = if let Some(version) = slot_version { format!( - "{}/v2/stackerdb/{}/{}/{}/{}", - http_origin, + "{http_origin}/v2/stackerdb/{}/{}/{slot_id}/{version}", StacksAddress::from(stackerdb_contract_id.issuer.clone()), stackerdb_contract_id.name, - slot_id, - version ) } else { format!( - "{}/v2/stackerdb/{}/{}/{}", - http_origin, + "{http_origin}/v2/stackerdb/{}/{}/{slot_id}", StacksAddress::from(stackerdb_contract_id.issuer.clone()), - stackerdb_contract_id.name, - slot_id + stackerdb_contract_id.name ) }; @@ -97,8 +91,7 @@ fn get_stackerdb_chunk( let res = client.get(&path).send().unwrap(); if res.status().is_success() { - let chunk_data: Vec = res.bytes().unwrap().to_vec(); - return chunk_data; + res.bytes().unwrap().to_vec() } else { eprintln!("Get chunk error: {}", res.text().unwrap()); panic!(""); @@ -115,7 +108,7 @@ fn test_stackerdb_load_store() { let (mut conf, _) = neon_integration_test_conf(); test_observer::register_any(&mut conf); - let privks = vec![ + let privks = [ // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R StacksPrivateKey::from_hex( "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", @@ -223,18 +216,18 @@ fn test_stackerdb_load_store() { // write some chunks and read them back for i in 0..3 { - let chunk_str = format!("Hello chunks {}", &i); + let chunk_str = format!("Hello chunks {i}"); let ack = post_stackerdb_chunk( &http_origin, &contract_id, chunk_str.as_bytes().to_vec(), &privks[0], 0, - (i + 1) as u32, + i + 1, ); - debug!("ACK: {:?}", &ack); + debug!("ACK: {ack:?}"); - let data = get_stackerdb_chunk(&http_origin, &contract_id, 0, Some((i + 1) as u32)); + let data = get_stackerdb_chunk(&http_origin, &contract_id, 0, Some(i + 1)); assert_eq!(data, chunk_str.as_bytes().to_vec()); let data = get_stackerdb_chunk(&http_origin, &contract_id, 0, None); @@ -252,7 +245,7 @@ fn test_stackerdb_event_observer() { let (mut conf, _) = neon_integration_test_conf(); test_observer::register(&mut conf, &[EventKeyType::StackerDBChunks]); - let privks = vec![ + let privks = [ // ST2DS4MSWSGJ3W9FBC6BVT0Y92S345HY8N3T6AV7R StacksPrivateKey::from_hex( "9f1f85a512a96a244e4c0d762788500687feb97481639572e3bffbd6860e6ab001", @@ -362,7 +355,7 @@ fn test_stackerdb_event_observer() { for i in 0..6 { let slot_id = i as u32; let privk = &privks[i / 3]; - let chunk_str = format!("Hello chunks {}", &i); + let chunk_str = format!("Hello chunks {i}"); let ack = post_stackerdb_chunk( &http_origin, &contract_id, @@ -371,7 +364,7 @@ fn test_stackerdb_event_observer() { slot_id, 1, ); - debug!("ACK: {:?}", &ack); + debug!("ACK: {ack:?}"); let data = get_stackerdb_chunk(&http_origin, &contract_id, slot_id, Some(1)); assert_eq!(data, chunk_str.as_bytes().to_vec()); @@ -383,11 +376,10 @@ fn test_stackerdb_event_observer() { // get events, verifying that they're all for the same contract (i.e. this one) let stackerdb_events: Vec<_> = test_observer::get_stackerdb_chunks() .into_iter() - .map(|stackerdb_event| { + .flat_map(|stackerdb_event| { assert_eq!(stackerdb_event.contract_id, contract_id); stackerdb_event.modified_slots }) - .flatten() .collect(); assert_eq!(stackerdb_events.len(), 6); @@ -396,7 +388,7 @@ fn test_stackerdb_event_observer() { assert_eq!(i as u32, event.slot_id); assert_eq!(event.slot_version, 1); - let expected_data = format!("Hello chunks {}", &i); + let expected_data = format!("Hello chunks {i}"); let expected_hash = Sha512Trunc256Sum::from_data(expected_data.as_bytes()); assert_eq!(event.data, expected_data.as_bytes().to_vec());